diff --git a/.circleci/config.yml b/.circleci/config.yml new file mode 100644 index 000000000..cb4371175 --- /dev/null +++ b/.circleci/config.yml @@ -0,0 +1,637 @@ +version: 2.1 + +parameters: + docker-img: + type: 'string' + default: '' + +commands: + timeout: + parameters: + duration: + default: '10m' + type: 'string' + steps: + - run: + name: Cancel job after <> + background: true + command: | + sleep <> + echo "Cancelling job as <> has elapsed" + curl --fail -X POST -H "Circle-Token: ${CIRCLE_TOKEN}" "https://circleci.com/api/v1.1/project/github/${CIRCLE_PROJECT_USERNAME}/${CIRCLE_PROJECT_REPONAME}/${CIRCLE_BUILD_NUM}/cancel" + install-sdk: + parameters: + sdk: + type: 'string' + version: + type: 'string' + steps: + - restore_cache: + key: sdk-{{ .Environment.CIRCLE_JOB }}-{{ arch }}-<>-<> + - run: + name: Install SDK + command: | + curl -s "https://get.sdkman.io" | bash + source "$HOME/.sdkman/bin/sdkman-init.sh" + sdk version + sdk install <> <> + sdk default <> <> + sdk use <> <> + echo '### SDKMAN ###' >> "$BASH_ENV" + echo 'export SDKMAN_DIR="$HOME/.sdkman"' >> "$BASH_ENV" + echo '[[ -s "$HOME/.sdkman/bin/sdkman-init.sh" ]] && source "$HOME/.sdkman/bin/sdkman-init.sh"' >> "$BASH_ENV" + source "$BASH_ENV" + - save_cache: + key: sdk-{{ .Environment.CIRCLE_JOB }}-{{ arch }}-<>-<> + paths: + - ~/.sdkman + start-db: + parameters: + docker-img: + type: 'string' + default: 'docker.io/arangodb/enterprise:latest' + topology: + type: 'string' + default: 'single' + ssl: + type: 'string' + default: 'false' + compression: + type: 'string' + default: 'false' + steps: + - run: + name: Start Database + command: ./docker/start_db.sh + environment: + DOCKER_IMAGE: <> + STARTER_MODE: <> + STARTER_DOCKER_IMAGE: 'docker.io/arangodb/arangodb-starter:0.18.5' + SSL: <> + COMPRESSION: <> + install: + steps: + - run: + name: mvn install + command: mvn install -Dmaven.test.skip -Dgpg.skip -Dmaven.javadoc.skip + report: + parameters: + working_directory: + type: 'string' + default: '.' + steps: + - run: + name: Create reports + command: mvn surefire-report:failsafe-report-only + working_directory: <> + - store_artifacts: + path: <>/target/site + load_cache: + steps: + - run: + name: Generate Cache Checksum + command: find . -name 'pom.xml' | sort | xargs cat > /tmp/maven_cache_seed + - restore_cache: + key: maven-{{ .Environment.CIRCLE_JOB }}-{{ checksum "/tmp/maven_cache_seed" }} + store_cache: + steps: + - save_cache: + key: maven-{{ .Environment.CIRCLE_JOB }}-{{ checksum "/tmp/maven_cache_seed" }} + paths: + - ~/.m2/repository + config_gpg: + steps: + - run: + name: Configure GPG + command: echo $GPG_PRIVATE_KEY | base64 --decode | gpg --batch --no-tty --import --yes + deploy: + steps: + - run: + name: Deploy to Apache Maven Central + command: mvn -s .circleci/maven-release-settings.xml -Dmaven.test.skip deploy + +executors: + j17: + docker: + - image: 'cimg/openjdk:17.0' + j21: + docker: + - image: 'cimg/openjdk:21.0' + j23: + docker: + - image: 'cimg/openjdk:23.0' + +jobs: + + test: + parameters: + docker-img: + type: 'string' + default: 'docker.io/arangodb/enterprise:latest' + topology: + type: 'string' + default: 'single' + jdk: + type: 'string' + default: 'j21' + args: + type: 'string' + default: '' + ssl: + type: 'string' + default: 'false' + native: + type: 'string' + default: 'false' + graalvm-version: + type: 'string' + default: '21.0.2-graalce' + resource_class: + type: 'string' + default: 'medium' + executor: <> + resource_class: <> + steps: + - timeout + - checkout + - setup_remote_docker + - when: + condition: + equal: [ 'true', <> ] + steps: + - install-sdk: + sdk: 'java' + version: <> + - start-db: + docker-img: <> + topology: <> + ssl: <> + - run: + name: Start proxy + command: ./docker/start_proxy.sh + - load_cache + - run: + name: mvn dependency:tree + command: | + mvn dependency:tree -am -pl test-functional \ + -Dssl=<> \ + -Dnative=<> \ + <> + - run: + name: Test + command: | + mvn verify -am -pl test-functional -Dgpg.skip -Dmaven.javadoc.skip \ + -Dssl=<> \ + -Dnative=<> \ + <> + - report: + working_directory: test-functional + - store_cache + + # DE-847 + # https://issues.apache.org/jira/browse/MSHADE-206 + # https://issues.apache.org/jira/browse/MNG-5899 + test-shaded: + parameters: + docker-img: + type: 'string' + default: 'docker.io/arangodb/enterprise:latest' + topology: + type: 'string' + default: 'single' + jdk: + type: 'string' + default: 'j21' + args: + type: 'string' + default: '' + ssl: + type: 'string' + default: 'false' + native: + type: 'string' + default: 'false' + graalvm-version: + type: 'string' + default: '21.0.2-graalce' + resource_class: + type: 'string' + default: 'medium' + executor: <> + resource_class: <> + steps: + - timeout + - checkout + - setup_remote_docker + - when: + condition: + equal: [ 'true', <> ] + steps: + - install-sdk: + sdk: 'java' + version: <> + - start-db: + docker-img: <> + topology: <> + ssl: <> + - run: + name: Start proxy + command: ./docker/start_proxy.sh + - load_cache + - install + - run: + name: mvn dependency:tree + working_directory: test-functional + command: | + mvn dependency:tree \ + -Dshaded \ + -Dssl=<> \ + -Dnative=<> \ + <> + - run: + name: Test + working_directory: test-functional + command: | + mvn verify -Dgpg.skip \ + -Dshaded \ + -Dssl=<> \ + -Dnative=<> \ + <> + - report: + working_directory: test-functional + - store_cache + + test-non-func: + executor: 'j21' + steps: + - timeout + - checkout + - setup_remote_docker + - install-sdk: + sdk: 'java' + version: '21.0.2-graalce' + - start-db + - load_cache + - run: + name: mvn dependency:tree + command: mvn dependency:tree -am -pl test-non-functional + - run: + name: Test + command: mvn verify -am -pl test-non-functional -Dgpg.skip -Dmaven.javadoc.skip + - report: + working_directory: test-non-functional + - store_cache + + # DE-847 + # https://issues.apache.org/jira/browse/MSHADE-206 + # https://issues.apache.org/jira/browse/MNG-5899 + test-non-func-shaded: + executor: 'j21' + steps: + - timeout + - checkout + - setup_remote_docker + - install-sdk: + sdk: 'java' + version: '21.0.2-graalce' + - start-db + - load_cache + - install + - run: + name: mvn dependency:tree + working_directory: test-non-functional + command: mvn dependency:tree -Dshaded + - run: + name: Test + working_directory: test-non-functional + command: mvn verify -Dgpg.skip -Dmaven.javadoc.skip -Dshaded + - report: + working_directory: test-non-functional + - store_cache + + sonar: + executor: 'j21' + resource_class: 'large' + steps: + - timeout + - checkout + - setup_remote_docker + - start-db + - load_cache + - restore_cache: + name: Restore Sonar cache + key: sonar-{{ .Environment.CIRCLE_JOB }}-{{ checksum "/tmp/maven_cache_seed" }} + - run: + name: Test + command: mvn verify -am -pl test-functional -Pstatic-code-analysis -Dgpg.skip -Dmaven.javadoc.skip + - run: + name: Analyze + command: mvn verify -Pstatic-code-analysis -Dmaven.test.skip -Dgpg.skip -Dmaven.javadoc.skip org.sonarsource.scanner.maven:sonar-maven-plugin:sonar -Dsonar.projectKey=arangodb_arangodb-java-driver + - save_cache: + name: Save Sonar cache + key: sonar-{{ .Environment.CIRCLE_JOB }}-{{ checksum "/tmp/maven_cache_seed" }} + paths: + - ~/.sonar/cache + - store_cache + + tutorial: + executor: 'j21' + steps: + - timeout + - checkout + - setup_remote_docker + - start-db + - load_cache + - run: + name: mvn install + command: mvn install -Dmaven.test.skip -Dgpg.skip -am -pl driver + - run: + name: Run Maven + command: mvn compile exec:java -Dexec.mainClass=FirstProject + working_directory: tutorial/maven + - run: + name: Run Gradle + command: gradle run + working_directory: tutorial/gradle + - store_cache + + resilience-test: + executor: 'j21' + resource_class: 'large' + steps: + - timeout + - checkout + - setup_remote_docker + - start-db: + topology: 'cluster' + compression: 'true' + - load_cache + - run: + name: Start Toxiproxy + command: ./bin/startProxy.sh + working_directory: test-resilience + background: true + environment: + TOXIPROXY_VERSION: v2.9.0 + - run: + name: mvn dependency:tree + command: mvn dependency:tree -am -pl test-resilience + - run: + name: Test + command: mvn verify -am -pl test-resilience -Dgpg.skip -Dmaven.javadoc.skip + - report: + working_directory: test-resilience + - store_cache + + # DE-847 + # https://issues.apache.org/jira/browse/MSHADE-206 + # https://issues.apache.org/jira/browse/MNG-5899 + resilience-test-shaded: + executor: 'j21' + resource_class: 'large' + steps: + - timeout + - checkout + - setup_remote_docker + - start-db: + topology: 'cluster' + compression: 'true' + - load_cache + - install + - run: + name: Start Toxiproxy + command: ./bin/startProxy.sh + working_directory: test-resilience + background: true + environment: + TOXIPROXY_VERSION: v2.9.0 + - run: + name: mvn dependency:tree + working_directory: test-resilience + command: mvn dependency:tree -Dshaded + - run: + name: Test + working_directory: test-resilience + command: mvn verify -Dgpg.skip -Dmaven.javadoc.skip -Dshaded + - report: + working_directory: test-resilience + - store_cache + + deploy: + executor: 'j17' + steps: + - timeout: + duration: '30m' + - checkout + - load_cache + - config_gpg + - deploy + - store_cache + +workflows: + test-adb-version: + when: + not: <> + jobs: + - test: + name: test-single-<> + matrix: + parameters: + docker-img: + - 'docker.io/arangodb/arangodb:3.11' + - 'docker.io/arangodb/arangodb:3.12' + - 'docker.io/arangodb/enterprise:3.11' + - 'docker.io/arangodb/enterprise:3.12' + topology: + - 'single' + args: + - '-DenableSlowTests=true' + - test: + name: test-cluster-<> + matrix: + parameters: + docker-img: + - 'docker.io/arangodb/arangodb:3.11' + - 'docker.io/arangodb/arangodb:3.12' + - 'docker.io/arangodb/enterprise:3.11' + - 'docker.io/arangodb/enterprise:3.12' + topology: + - 'cluster' + args: + - '-DenableSlowTests=true' + + test-adb-topology: + when: <> + jobs: + - test: + name: test-<> + matrix: + parameters: + docker-img: + - <> + topology: + - 'single' + - 'cluster' + args: + - '-DenableSlowTests=true' + - test: + name: test-ssl + matrix: + parameters: + docker-img: + - <> + ssl: + - 'true' + + test-func: + when: + not: <> + jobs: + - test: + name: test-ssl=<> + matrix: + parameters: + ssl: + - 'true' + - 'false' + - test-shaded: + name: test-shaded-ssl=<> + matrix: + parameters: + ssl: + - 'true' + - 'false' + - test: + name: test-jdk=<> + matrix: + parameters: + jdk: + - 'j17' + - 'j21' + - 'j23' + filters: + tags: + only: /^v.*/ + branches: + only: + - main + - next + - test: + name: test-jackson-<> + matrix: + parameters: + args: + - '-Dadb.jackson.version=2.19.0' + - '-Dadb.jackson.version=2.18.3' + - '-Dadb.jackson.version=2.17.3' + - '-Dadb.jackson.version=2.16.2' + - '-Dadb.jackson.version=2.15.4' + - '-Dadb.jackson.version=2.14.3' + - '-Dadb.jackson.version=2.13.5' + - '-Dadb.jackson.version=2.12.7' + - '-Dadb.jackson.version=2.11.4' + - '-Dadb.jackson.version=2.10.5' + filters: + tags: + only: /^v.*/ + branches: + only: + - main + - next + - test: + name: test-native-ssl=<>-<> + matrix: + parameters: + native: + - 'true' + resource_class: + - '2xlarge' + ssl: + - 'true' + - 'false' + graalvm-version: + - '21.0.2-graalce' + filters: + tags: + only: /^v.*/ + branches: + only: + - main + - next + - test-shaded: + name: test-native-shaded-ssl=<>-<> + matrix: + parameters: + native: + - 'true' + resource_class: + - '2xlarge' + ssl: + - 'true' + - 'false' + graalvm-version: + - '21.0.2-graalce' + filters: + tags: + only: /^v.*/ + branches: + only: + - main + - next + - test: + name: test-activefailover-<> + matrix: + parameters: + docker-img: + - 'docker.io/arangodb/arangodb:3.11' + - 'docker.io/arangodb/enterprise:3.11' + topology: + - 'activefailover' + filters: + tags: + only: /^v.*/ + branches: + only: + - main + - next + + test-non-func: + when: + not: <> + jobs: + - test-non-func: + name: test-non-func + - test-non-func-shaded: + name: test-non-func-shaded + + sonar: + when: + not: <> + jobs: + - sonar: + name: sonar + + tutorial: + when: + not: <> + jobs: + - tutorial + + resilience-test: + when: + not: <> + jobs: + - resilience-test: + name: resilience-test + - resilience-test-shaded: + name: resilience-test-shaded + + deploy: + jobs: + - deploy: + context: java-release + filters: + tags: + only: /^deploy.*/ + branches: + ignore: /.*/ diff --git a/.circleci/maven-release-settings.xml b/.circleci/maven-release-settings.xml new file mode 100644 index 000000000..d8e10fc5d --- /dev/null +++ b/.circleci/maven-release-settings.xml @@ -0,0 +1,25 @@ + + + + + central + + true + + + ${env.GPG_KEYNAME} + ${env.GPG_PASSPHRASE} + + + + + + + central + ${env.CENTRAL_USERNAME} + ${env.CENTRAL_PASSWORD} + + + + diff --git a/.gitignore b/.gitignore index fef573878..159df0dc0 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,14 @@ -/.classpath -/.project -/.settings -/target -/.idea -/*.iml +.classpath +.project +.settings +target +.idea +*.iml +.directory + +test-results-native +.flattened-pom.xml +/test-resilience/bin/toxiproxy-server-linux-amd64 + +dependency-reduced-pom.xml +/bin/ diff --git a/.mvn/extensions.xml b/.mvn/extensions.xml new file mode 100644 index 000000000..239eea5d7 --- /dev/null +++ b/.mvn/extensions.xml @@ -0,0 +1,8 @@ + + + + org.apache.maven.extensions + maven-build-cache-extension + 1.2.0 + + \ No newline at end of file diff --git a/.mvn/maven-build-cache-config.xml b/.mvn/maven-build-cache-config.xml new file mode 100644 index 000000000..50f8adbd2 --- /dev/null +++ b/.mvn/maven-build-cache-config.xml @@ -0,0 +1,32 @@ + + + + + + + .flattened-pom.xml + dependency-reduced-pom.xml + + + + + + + + + + + + + + + + + classes + + + + + diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index b029a227f..000000000 --- a/.travis.yml +++ /dev/null @@ -1,15 +0,0 @@ -language: java - -before_script: - - chmod 777 ./tests/travis/setup_arangodb.sh - - ./tests/travis/setup_arangodb.sh - -after_script: - - killall -9 arangod_x86_64 - -install: mvn install -DskipTests=true -Dgpg.skip=true -Dmaven.javadoc.skip=true -B -V - -jdk: - - oraclejdk8 - -sudo: false diff --git a/ChangeLog b/ChangeLog deleted file mode 100644 index 00bc8e42a..000000000 --- a/ChangeLog +++ /dev/null @@ -1,454 +0,0 @@ -v4.3.4 (2018-xx-xx) ---------------------------- -* made ErrorEntity serializable (Issue #178) -* fixed VelocyStream multi-thread authentication bug - -v4.3.3 (2018-02-01) ---------------------------- -* added CollectionCreateOptions.distributeShardsLike(String) (Issue #170) -* fixed inconsistency of getDocument() variants (Issue #168) -* added AqlQueryOptions.memoryLimit(Long) -* added AqlQueryOptions.failOnWarning(Boolean) -* added AqlQueryOptions.maxTransactionSize(Long) -* added AqlQueryOptions.maxWarningCount(Long) -* added AqlQueryOptions.intermediateCommitCount(Long) -* added AqlQueryOptions.intermediateCommitSize(Long) -* added AqlQueryOptions.satelliteSyncWait(Double) -* added AqlQueryOptions.skipInaccessibleCollections(Boolean) -* added TransactionOptions.maxTransactionSize(Long) -* added TransactionOptions.intermediateCommitCount(Long) -* added TransactionOptions.intermediateCommitSize(Long) -* added QueryEntity.getBindVars(): Map -* added QueryEntity.getState(): QueryExecutionState - -v4.3.2 (2017-11-30) ---------------------------- -* fixed redirect header (uppercase) - -v4.3.1 (2017-11-27) ---------------------------- -* fixed default Json parsing, include null values (Issue #163) -* fixed Json parsing of negative long (Issue #151) - -v4.3.0 (2017-11-23) ---------------------------- -* added load balancing (ArangoDB.Builder.loadBalancingStrategy()) -* added automatic acquiring of hosts for load balancing or as fallback (ArangoDB.Builder.acquireHostList()) - -v4.2.7 (2017-11-03) ---------------------------- -* added ArangoGraph.exists() -* fixed deserialization of BigDecimal - -v4.2.6 (2017-10-23) ---------------------------- -* issue #151: fixed de-/serialization of negative int values -* exclude junit dependency of json-simple - -v4.2.5 (2017-10-16) ---------------------------- -* issue #146: added ArangoCollection.exists() -* added ArangoDatabase.exists() -* issue #152: added BaseDocument.setId(String) -* added GraphCreateOptions.replicationFactor(Integer) -* issue #156: ArangoDB.shutdown() now closes all connections - -v4.2.4 (2017-09-04) ---------------------------- -* fixed ArangoDatabase.transaction(): ignore null result -* added properties validation arangodb.host -* added ArangoCollection.ensureIndex() -* made ArangoCollection.createIndex() deprecated -* let ArangoCursor implement Iterable -* fixed issue #145: ArangoCollection.updateDocument() -* fixed issue #145: ArangoVertexCollection.updateVertex() -* fixed issue #145: ArangoEdgeCollection.updateEdge() - -v4.2.3 (2017-07-31) ---------------------------- -* added ArangoDatabase.getPermissions(String) -* added ArangoCollection.getPermissions(String) -* added ArangoDB.grantDefaultDatabaseAccess(String, Permissions) -* added ArangoDB.grantDefaultCollectionAccess(String, Permissions) -* added ArangoDatabase.grantDefaultCollectionAccess(String, Permissions) -* fixed DateUtil (thread-safe) - -v4.2.2 (2017-07-20) ---------------------------- -* added ArangoDatabase.grantAccess(String, Permissions) -* added ArangoCollection.grantAccess(String, Permissions) -* added ArangoDatabase.resetAccess(String) -* added ArangoCollection.resetAccess(String) -* added ArangoDB.updateUserDefaultDatabaseAccess(String, Permissions) -* added ArangoDB.updateUserDefaultCollectionAccess(String, Permissions) -* added ArangoDatabase.updateUserDefaultCollectionAccess(String, Permissions) -* added ArangoCollection.getDocuments(Collection, Class) -* added connection/handshake retry on same host -* added deduplicate field for hash/skiplist index - -v4.2.1 (2017-06-20) ---------------------------- -* fixed deserializing of internal field _id - -v4.2.0 (2017-06-14) ---------------------------- -* added ArangoDBVersion.getLicense() -* added ArangoDB.getRole() -* added ArangoDBException.getException() -* added protocol switch (ArangoDB.Builder.useProtocol(Protocol)) - * Protocol.VST = VeclocyStream (default) - * Protocol.HTTP_JSON = Json over HTTP - * Protocol.HTTP_VPACK = VelocyPack over HTTP - -v4.1.12 (2017-04-13) ---------------------------- -* added ArangoDatabase.cursor() (issue #116) -* optimized ArangoDB.Builder for better multi thread support -* fixed VPackSlice float/double bug - -v4.1.11 (2017-03-24) ---------------------------- -* fixed exception handling in Connection (issue #110) -* fixed NPE in ArangoCursor (issue #112) -* extracted VelocyPack implementation to https://github.com/arangodb/java-velocypack -* added convenience methods (ArangoDatabase.arango(), ArangoCollection.db(), ArangoGraph.db()) -* added convenience methods (ArangoCollection.getIndex(String), .deleteIndex(key)) -* added connection pooling (issue #103) -* added extension point for VelocyPack serialization (ArangoDB.registerModule()) -* added support for replacing build-in VelocyPack serializer/deserializer -* added ArangoDatabase.getVersion(), ArangoDatabase.getAccessibleDatabases() - -v4.1.10 (2017-02-22) ---------------------------- -* changed velocystream message sending to async -* changed return value of getVertex/getEdge to null if not exists -* added support for multiple hosts as fallbacks -* added support serializing collections with null elements -* added support serializing non-generic classes that extend collections -* added support serializing/deserializing byte and Byte -* added default value "root" for user -* fixed serialization of additionalFields for objects and maps -* fixed VPack parsing (arrays of specific length) - -v4.1.9 (2017-02-10) ---------------------------- -* added missing IndexType.edge -* fixed Connection (thread-safe) -* fixed URI encoding - -v4.1.8 (2017-02-03) ---------------------------- -* changed java.util.Date serialization from VPack.date to VPack.string (ISO 8601) -* changed java.sql.Date serialization from VPack.date to VPack.string (ISO 8601) -* changed java.sql.Timestamp serialization from VPack.date to VPack.string (ISO 8601) -* added byte[] de-/serialization from/to VPack.string (Base64) -* added ArangoCollection.drop(isSystem) -* improved ArangoDBException with responseCode, errorNum, errorMessage -* changed ArangoCollection.deleteDocuments() to work with keys and documents -* fixed URL encoding bug (#97) -* fixed update/replaceDocumets with Json (#98) - -v4.1.7 (2017-01-26) ---------------------------- -* fixed importDocuments, insertDocuments to work with raw Jsons (issue #91) - -v4.1.6 (2017-01-18) ---------------------------- -* added serializer support for enclosing types - -v4.1.5 (2017-01-12) ---------------------------- -* fixed VPack String serialization (UTF-8 encoding) -* fixed VPack parsing of fields of type Object -* fixed VPack serializing of array with null values (issue #88) -* added configuration for custom annotations within VPack de-/serialization -* added support of transient modifier within VPack de-/serialization - -v4.1.4 (2016-12-19) ---------------------------- -* added VPack serializer/de-serializer for java.util.UUID -* fixed VPack parsing (issue #65, #80, #82) - -v4.1.3 (2016-11-22) ---------------------------- -* fixed error while serializing long values with VPackBuilder -* added bulk import API - -v4.1.2 (2016-11-10) ---------------------------- -* fixed GraphEntity for ArangoDatabase.getGraphs() (field name is null) -* added VelocyPack UTC_DATE parsing to Json String (ISO 8601) -* added configuration methods for VPackParser in ArangoDB.Builder -* added VPackJsonSerializer for VPackParser - -v4.1.1 (2016-11-09) ---------------------------- -* changed json parsing of VelocyPack types not known in json -* fixed VelocyPack bug with non-ASCII characters -* added missing replicationFactor in CollectionCreateOptions -* added missing replicationFactor in CollectionPropertiesEntity -* added option serializeNull in DocumentUpdateOptions - -v4.1.0 (2016-10-28) ---------------------------- -* changed VelocyStream communication (send protocol header) -* added ArangoUtil for manually de-/serialization - -v4.0.0 (2016-10-17) ---------------------------- -* replaced API -* replaced protocol http with VelocyStream -* added VelocyPack support -* added multi document operations (insert, delete, update, replace) - -v3.1.0 (2016-10-17) ---------------------------- -* removed Methods with collectionId (long) from ArangoDriver (Id is only for internal usage) -* changed Revision from long to String -* removed Methods with documentId (long) from ArangoDriver -* added profile flag to AqlQueryOptions (issue #47) - -v3.0.4 (2016-10-17) ---------------------------- -* fixed edges deserializer (issue #50) - -v3.0.3 (2016-09-12) ---------------------------- -* added error handling in getBatchResponseByRequestId() -* added function createPersistentIndex() (issue #48) -* added deserializer for BaseDocument (issue #50) - -v3.0.2 (2016-08-05) ---------------------------- -* added profile flag to AqlQueryOptions (issue #47) -* added getExtra() to DocumentCursor<> (issue #47) -* added IndexType.PERSISTENT (issue #48) - -v3.0.1 (2016-07-08) ---------------------------- -* added flag complete and details in ImportOptions -* fixed issue #43 (ArangoDriver.getAqlFunctions(String) does not uses the defaultDatabase setting) - -v3.0.0 (2016-06-17) ---------------------------- -* removed ArangoDriver.EdgeEntity() (/_api/edge withdrawn in Server) -* removed CAP-Index (Cap-constraints are withdrawn in Server) -* removed Param database in User-Methods (in 3.0 users are managed in _users Collection in _system Database only) -* added User-Method grantDatabaseAccess(username, database) -* replaced Graph-Functions (graph_edge, graph_vertices, graph_shortes_path) with AQL -* removed deprecated Methods -* added Transaction attribute allowImplicit -* refactored QueryCachePropertiesEntity, TransactionResultEntity - -v2.7.4 (2016-04-15) ---------------------------- -* fixed issue #35 (There is no replaceDocumentRaw similar to createDocumentRaw) - -v2.7.3 (2016-03-25) ---------------------------- -* batch driver performance fix -* fixed issue #33 (typo in ArangoDriver.executeAqlQueryWithDocumentCursorResutl method) - -v2.7.2 (2016-01-22) ---------------------------- -* added executeAqlQueryRaw(...). Example src/test/java/com/arangodb/example/document/RawDocumentExample.java - -v2.7.1 (2016-01-21) ---------------------------- -* added examples for new AQL traversal functions (since ArangoDB 2.8) -* added AQL warnings to CursorResult (hasWarning() and getWarnings()) -* added createDocumentRaw(...) and getDocumentRaw(...). Examples src/test/java/com/arangodb/example/document/RawDocumentExample.java -* Updated dependencies gson (2.5), httpclient (4.5.1) and slf4j-api (1.7.13) - -v2.7.0 (2015-11-20) ---------------------------- -* added document examples in src/test/java/com/arangodb/example/document/ -* added graph examples in src/test/java/com/arangodb/example/document/ -* new function executeAqlQueryJSON(): Executes an AQL query and returns the raw JSON response as a String -* initial support of HTTPS connections. Examples src/test/java/com/arangodb/example/ssl/ - - -v2.6.9 (2015-10-16) ---------------------------- -* add support API: /_api/query-cache/properties (AQL query tracking properties: setQueryTrackingProperties(), getQueryTrackingProperties()) -* add support API: /_api/query-cache (delete AQL query cache: deleteQueryCache()) -* add support API: /_api/query/current (currently running AQL queries: getCurrentlyRunningQueries()) -* add support API: /_api/query/slow (slow AQL queries: getSlowQueries(), deleteSlowQueries()) -* add support API: /_api/query (kill AQL queries: killQuery()) -* added: boolean exists(long collectionId, long documentId) -* added: boolean exists(String collectionName, long documentId) -* added: boolean exists(long collectionId, String documentKey) throws ArangoException { -* added: boolean exists(String collectionName, String documentKey) -* added: boolean exists(String documentHandle) - -v2.6.8 (2015-09-25) ---------------------------- -* fixed GRAPH_EDGES() 2.6-incompatibility - -v2.5.6 (2015-07-04) ---------------------------- -* fixed issue #19 - * createEdge takes graphName but needs database name -* ArangoDB 2.6 support - -v2.5.5 (2015-05-23) ---------------------------- -* updated driver.getTraversal(...); - * removed VisitedEntity (Traversal) - * added TraversalQueryOptions to avoid too many parameters -* added an examples for Transaction API - (see src/test/java/com/arangodb/example/TransactionExample.java) -* fixed issue #17 - * Changed TransactionEntity.ReadWriteCollections to a static class - -v2.5.4 (2015-05-03) ---------------------------- -* fixed issue #12 - * added auto reconnection when connection breaks - * added fallback server endpoints -* added new cursor implementation for AQL queries - * DocumentCursor executeDocumentQuery(...) - * VertexCursor executeVertexQuery(...) - * EdgeCursor executeEdgeQuery(...) -* added new cursor implementation for simple queries - * DocumentCursor executeSimpleAllDocuments(...) - * DocumentCursor executeSimpleByExampleDocuments(...) - * DocumentCursor executeSimpleRangeWithDocuments(...) - * DocumentCursor executeSimpleFulltextWithDocuments(...) -* added some examples for AQL queries - (see src/test/java/com/arangodb/example) - -v2.5.3 (2015-03-29) ---------------------------- -* fixed issue #9 - * added method to driver.getTraversal(...); - -v2.5.0 ---------------------------- -Added support for sparse indexes - -v2.4.4 ---------------------------- -* fixed issue #5 - * added method to driver.createGraph(GraphEntity g); -* fixed issue #6 -* fixed issue #7 - -v2.4.3 ---------------------------- -* Fixed a graph bug: when retrieving a graph via the driver, "from" and "to" were emtpy. This is fixed now. -* GraphEntity has been changed, so that edge definitions are stored in an EdgeDefinitionsEntity. -* Some additional methods in GraphEntity: - * public EdgeDefinitionsEntity getEdgeDefinitionsEntity() - * public void setEdgeDefinitionsEntity(EdgeDefinitionsEntity edgeDefinitionsEntity) -* Some additional methods in EdgeDefinitionsEntity: - * public int getSize() - * public EdgeDefinitionEntity getEdgeDefinition(String collectionName) - -v2.4.2 ---------------------------- -Fixed issue#2 - -v2.4.1 ---------------------------- -httpclient version 4.3.6 - -v1.4.1(2014-02-04) ---------------------------- -* rename attribute: UserEntity#user -> UserEntity#username -* add support API: GET /_api/database/user (getDatabases) -* modify API: createDatabase(db) -> createDatabase(db, users...) -* add debug property: enableCURLLogger -* add Annotation @DocumentKey and support user define document key. -* Bug fixed: raise error if _key is not number. -* document API support _key. -* Fixed OraacleJDK build error.(#11) - -v1.4.0(2013-11-26) ---------------------------- -* support database (for 1.4 feature) -* stop the API of edge, edges in version 1.4.0 -* stop the API of kvs(/_api/key) in version 1.4.0(because server does not support yet.) -* add support API: /_api/database/* -* add configure: defaultDatabase -* add support API: /_admin/statistics (getStatistics) -* add support API: /_admin/statistics-description (statisticsDescription) -* add support API: /_api/endpoint/* -* add support API: /_api/collection/{collection-name}/checksum (getCollectionChecksum) -* add support API: /_api/example/first -* add support API: /_api/example/last -* add support API: /_api/replication/inventory (getReplicationInventory) -* add support API: /_api/replication/dump (getReplicationDump) -* add support API: /_api/replication/server-id (getReplicationServerId) -* add support API: /_api/replication/logger-start (startReplicationLogger) -* add support API: /_api/replication/logger-stop (stopReplicationLogger) -* add support API: GET /_api/replication/logger-state (getReplicationLoggerState) -* add support API: GET /_api/replication/logger-config (getReplicationLoggerConfig) -* add support API: PUT /_api/replication/logger-config (setReplicationLoggerConfig) -* add support API: GET /_api/replication/applier-config (getReplicationApplierConfig) -* add support API: PUT /_api/replication/applier-config (setReplicationApplierConfig) -* add support API: PUT /_api/replication/applier-start (startReplicationApplier) -* add support API: PUT /_api/replication/applier-stop (stopReplicationApplier) -* add support API: GET /_api/replication/applier-state (getReplicationApplierState) -* add support API: POST /_admin/execute (executeScript) -* add support API: POST /_api/graph (createGraph) -* add support API: GET /_api/graph (getGraphs) -* add support API: GET /_api/graph/{graph-name} (getGraph) -* add support API: DELETE /_api/graph/{graph-name} (deleteGraph) -* add support API: POST /_api/graph/{graph-name}/vertex (createVertex) -* add support API: GET /_api/graph/{graph-name}/vertex/{document-key} (getVertex) -* add support API: DELETE /_api/graph/{graph-name}/vertex/{document-key} (deleteVertex) -* add support API: PUT /_api/graph/{graph-name}/vertex/{document-key} (replaceVertex) -* add support API: PATCH /_api/graph/{graph-name}/vertex/{document-key} (updateVertex) -* add support API: POST /_api/graph/{graph-name}/vertices (getVertices) -* add support API: POST /_api/graph/{graph-name}/vertices/{vertex-key} (getVertices) -* add support API: POST /_api/graph/{graph-name}/edge (createEdge) -* add support API: GET /_api/graph/{graph-name}/edge/{edge-key} (getEdge) -* add support API: DELETE /_api/graph/{graph-name}/edge/{edge-key} (deleteEdge) -* add support API: PUT /_api/graph/{graph-name}/edge/{edge-key} (replaceEdge) -* add support API: POST /_api/graph/{graph-name}/edges (getEdges) -* add support API: POST /_api/graph/{graph-name}/edges/{vertex-key} (getEdges) -* add attribute: CollectionEntity#checksum -* add attribute: CollectionEntity#doCompact -* add attribute: CollectionEntity#keyOptions -* add attribute: CollectionEntity.Figures#(compactorsCount,compactorsFileSize,shapefilesCount,shapefilesFileSize,shapesCount,attributesCount) -* add doCompact to argument of createCollection -* remove attribute: CollectionEntity#createOptions -* getDocuments was changed to return document-handle -* rename method: updateDocument -> replaceDocument -* rename method: partialUpdateDocument -> updateDocument -* changed the version API endpoint. (/_admin/version -> /_api/version) -* changed into createOptions to keyOptions of collection API -* fixed ArangoUnixTime bug. -* remove: getServerStatus -* remove: getConnectionStatistics -* gson library upgrade to 2.2.4 -* refactoring deserialize of parameterized entity class. - -v1.2.2(2013-07-10) ---------------------------- - -* add support API: /_api/explain -* add support API: /_api/collection/collection-name/revision -* add support API: /_api/index of fulltext -* add support API: /_api/simple/fulltext -* add support API: /_admin/modules/flush -* add support API: /_admin/routing/reload -* add support API: User Management -* add support: Basic Authentication -* /_api/simple/all and /_api/simple/by-example returns DocumentEntity -* add support import API - -v1.2.1(2013-07-02) ------------------- - -* Add support: load configure from property-file in classpath. -* Add configure: timeout, connectionTimeout, retryCount. -* Remove configure: autoUnknownCollections. -* Change google-gson scope in pom.xml -* Change logback-classic in pom.xml -* Remove README.JA - -v1.2.0(2013-06-30) ------------------- -* Initial Release. diff --git a/ChangeLog.md b/ChangeLog.md new file mode 100644 index 000000000..3a55992a5 --- /dev/null +++ b/ChangeLog.md @@ -0,0 +1,1640 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/) and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html). + +## [Unreleased] + +## [7.22.0] - 2025-08-06 + +- wildcard generic AQL bind vars (#612, DE-991) + +## [7.21.0] - 2025-07-23 + +- added SSL configuration properties (DE-1010, #611) +- fixed support to Jackson `2.19` + +## [7.20.0] - 2025-06-17 + +- added option `usePlanCache` to `AqlQueryOptions` (DE-973, #609) +- updated Jackson version to `2.19` (DE-1012, #607) + +## [7.19.0] - 2025-05-28 + +- fixed connection pool load-balancing (DE-1016, #602), now the connection pool: + - keeps track of busy connections (or busy HTTP/2 streams) + - enqueues new requests only to connections that are not busy (or that have available HTTP/2 streams) + - waits asynchronously if all the connections are busy (or all HTTP/2 streams are busy) +- added new option to configure HTTP/1.1 pipelining (`com.arangodb.ArangoDB.Builder.pipelining(Boolean)`), + `false` by default +- changed default configuration HTTP/1.1 pipelining to `false` + +## [7.18.0] - 2025-05-06 + +- updated `jackson-dataformat-velocypack` to version `4.6.0` +- exposed configuration properties keys in `ArangoConfigProperties` +- deprecated `CollectionStatus` +- fixed `equals()` and `hashCode()` in some entity classes + +## [7.17.1] - 2025-03-27 + +- implemented `equals()` and `hashCode()` for all entity classes +- fixed overlapping resources in shaded package + +## [7.17.0] - 2025-01-27 + +- allow construct ArangoConfigProperties from `java.util.Properties` (DE-976) +- made BaseDocument and BaseEdgeDocument serializable (#596) + +## [7.16.0] - 2025-01-09 + +- improved deserialization of `RawBytes` and `RawJson` (#592, DE-969) +- added support to Jakarta JSON-P data types (#593, DE-968) +- fixed ArangoSearch `PrimarySort` serialization + +## [7.15.0] - 2024-12-10 + +- added missing collection options (#590, DE-961) +- improved serde performances (#588, DE-959) + +## [7.14.0] - 2024-12-06 + +- support all AQL query options in `ArangoDatabase.explainAqlQuery()` (#589, ES-2266) + +## [7.13.1] - 2024-11-29 + +- tolerate error responses with text content-type (#587, DE-960) + +## [7.13.0] - 2024-11-15 + +- improved serialization and deserialization of `RawBytes` and `RawJson` (#586) + +## [7.12.0] - 2024-11-07 + +- added new method `ArangoDatabase.explainAqlQuery()`, supporting arbitrary JSON-like response data +- deprecated `ArangoDatabase.explainQuery()` + +## [7.11.0] - 2024-10-31 + +- added support to HTTP proxies (#584, DE-930) + +## [7.10.0] - 2024-10-22 + +- udpated Jackson to version `2.18` (#581, DE-877) +- added missing statistics to `CursorStats` (#580, DE-876) +- fixed type of `AqlExecutionExplainEntity.warnings` (#579, DE-886) + +## [7.9.0] - 2024-09-20 + +- updated `velocypack` to version `3.1.0` +- updated `jackson-dataformat-velocypack` to version `4.4.0` +- added `SHADED` flag in `PackageVersion` class (#576) +- added `serdeProviderClass` configuration property (#575, DE-837) +- added `skipFastLockRound` parameter to StreamTransactionOptions (#574, DE-832) +- added support to reset log levels (#573, DE-831) +- added `legacy` option to `GeoJSONAnalyzerProperties` (#572, DE-736) +- support resuming AQL cursor in transaction (#571, DE-592) +- fΓ­xed `HostHandler` concurrency (DE-663) +- fΓ­xed `ConnectionPoolImpl` concurrency (#570, DE-536) + +## [7.8.0] - 2024-09-02 + +- added property `ignoreRevs` to DocumentDeleteOptions (#567, DE-844) + +## [7.7.1] - 2024-06-12 + +- fixed deserialization of responses with no content (#560) + +## [7.7.0] - 2024-06-07 + +- added configuration option to set Vert.x instance (#558, DE-535) +- added overloaded variant of `ArangoSerde#deserialize()` accepting `RequestContext` parameter (#555, #554, DE-771) +- updated `jackson-dataformat-velocypack` to version `4.3.0` +- fixed support to Jackson 2.17 +- fixed native image build for GraalVM 22 + +## [7.6.0] - 2024-03-22 + +- added support to external versioning (ArangoDB 3.12, #547) +- added support to `wildcard` analyzer (ArangoDB 3.12, #546) +- added support to `multi_delimiter` analyzer (ArangoDB 3.12, #545) +- added support to multi dimensional indexes (ArangoDB 3.12, #544) +- added support to WAND optimization (ArangoDB 3.12, #543) +- added support to content compression (ArangoDB 3.12, #535) +- fixed ALPN with H2 (DE-792, #551) +- tolerate SPI ServiceConfigurationError (DE-793, #552) +- added support to Jackson 2.17 +- changed default TTL to 30 seconds for HTTP connections (DE-794, #553) + +## [7.5.1] - 2024-01-24 + +- fixed inclusion of transitive dependency on `com.tngtech.archunit:archunit-junit5` + + +## [7.5.0] - 2024-01-23 + +- updated Vert.x to version 4.5 (#532) +- automatically configure Jackson stream constraints (DE-762, #537) +- fixed closing AQL cursor twice (#533) + + +## [7.4.0] - 2023-12-20 + +### Added + +- added new methods to remove graph definitions and vertex collections, to align the naming with the documentation (DE-729) +- added support to Jackson 2.16 (DE-735) + +### Changed + +- deprecated ArangoDB.Builder.asyncExecutor() (DE-726) +- retry requests on response code 503 (DE-55, #530) +- changed `ArangoCursor#close()` and `ArangoCursorAsync#close()` to be idempotent (DE-727, #528) +- changed default Jackson dependencies versions to 2.16 (DE-735) + +### Fixed + +- fixed exception handling on sending HTTP requests +- fixed management of hosts marked for deletion (DE-723, #384) +- fixed VST resilience (#529, DE-725) +- fixed failover with round-robin load balancing (DE-724) +- fixed init cause of `ArangoDBException` + + +## [7.3.0] - 2023-11-22 + +- changed types of documents and errors in `com.arangodb.entity.MultiDocumentEntity` to `java.util.List` + +## [7.2.0] - 2023-11-02 + +- added asynchronous API, accessible via `ArangoDB.async()` (DE-496, #523) +- added configuration option to specify the asynchronous downstream executor via `ArangoDB.Builder#asyncExecutor(Executor)` (DE-697) +- added missing asynchronous API to ensure parity with synchronous API +- changed behavior for acquiring the host list to be asynchronous (#521) +- changed internal communication to be asynchronous +- fixed swallowed exceptions in `ArangoCollection.getDocument()`, `ArangoCollection#documentExists()`, `ArangoCollection#exists()`, `ArangoEdgeCollection.getEdge()` and `ArangoVertexCollection#getVertex()` +- fixed `ArangoCursorAsync` API to be fully asynchronous (#433, #520) +- fixed interference of Jackson annotations with other Jackson instances (DE-636, #513) +- fixed nested properties deserialization in `BaseDocument` (#517) + +## [7.1.0] - 2023-05-26 + +- added support to Jackson 2.15 +- changed default Jackson dependencies versions to 2.15 +- updated transitive dependencies versions +- addeded `peakMemoryUsage` attribute to running and slow queries (ArangoDB 3.11, #507) +- added support for retriable batch results (ArangoDB 3.11, #505) +- added support for ArangoSearch WAND optimization (ArangoDB 3.11, #503) +- added support for cloneable AqlQueryOptions (#510) +- added support for `geo_s2` analyzer (#501) +- added support for serverId query parameter for `/_admin/log/level` (#498) +- added support for peakMemoryUsage and executionTime explain stats (#496) +- added support for Index cache refilling (#494) +- added support for ArangoSearch column cache (#492) + +## [7.0.0] - 2023-04-20 + +Detailed changes documentation is available [here](https://github.com/arangodb/docs/blob/main/drivers/java-changes-v7.md). + +### Added + +- added `ArangoDB.Builder.loadProperties(ArangoConfigProperties)` to register custom configuration suppliers +- added `ArangoConfigProperties.fromFile()` to load properties from local files +- added support to `HTTP/2` communication protocol +- added optional transitive dependency on `io.vertx:vertx-web-client` (can be excluded if using VST only) +- added transitive dependency on Jackson Core, Databind and Annotations +- added wrapper class for raw JSON content (`RawJson`) +- added wrapper class for content already encoded as byte array (`RawBytes`) +- added support for Jackson types (`JsonNode`, `ArrayNode`, `ObjectNode`, ...) +- added support for Jackson annotations in data types +- added new user data custom serializer API based on `ArangoSerde` +- added new user data custom serializer implementation based on Jackson (`JacksonSerde`), supporting both `JSON` and `VPACK` +- added methods and parameters targets to meta binding annotations +- added overloaded methods for CRUD operations allowing specifying the return type +- added API to support CRUD operations from raw data (`RawBytes` and `RawJson`) containing multiple documents +- added `BaseDocument#removeAttribute(String)` and `BaseEdgeDocument#removeAttribute(String)` +- added request id to `ArangoDBException` +- shaded version of the driver (`com.arangodb:arangodb-java-driver-shaded`) +- added `ArangoEdgeCollection.drop()` and `ArangoVertexCollection.drop(VertexCollectionDropOptions)` + +### Fixed + +- removed `--allow-incomplete-classpath` from native image configuration (#397) +- ability to control whether `null` values are included in the serialization (#389) +- added support to `DocumentCreateOptions#keepNull` (#374) +- allow specifying the return type on insertDocuments (#373) +- credentials logging (#410) +- fixed `ArangoCollection.rename()` and `ArangoView.rename()` thread safety + +### Changed + +- configuration properties from local files are not loaded automatically anymore +- `ArangoDB.execute()` accepts now target deserialization type +- `Request` and `Response` support now generic body type +- removed default host configuration (`127.0.0.1:8529`) +- changed http client library to Vert.x WebClient +- changed default communication protocol from `VST` to `HTTP/2` +- changed default content-type format from `VPACK` to `JSON` +- changed internal serialization, now based on Jackson API +- `VPACK` support is now provided by `JacksonSerde` including the optional dependency + `com.arangodb:jackson-dataformat-velocypack` (`VPACK` dataformat backend for Jackson) +- data objects passed as arguments to API methods are treated as immutable and the related metadata fields are not + updated in place anymore (updated metadata can be found in the returned object) +- changed some API signatures which were using unnecessary generics from `ArangoCollection`, `ArangoVertexCollection` and `ArangoEdgeCollection` +- changed `ArangoCursor#getStats()` return type +- replication factor is now represented by a new interface (`ReplicationFactor`) with + implementations: `NumericReplicationFactor` and `SatelliteReplicationFactor` +- all data definition classes are now `final` (packages `com.arangodb.entity` and `com.arangodb.model`) +- `BaseDocument` and `BaseEdgeDocument` are now `final` +- `BaseDocument#getProperties()` and `BaseEdgeDocument#getProperties()` return now an unmodifiable map +- `BaseDocument` and `BaseEdgeDocument` are not serializable anymore (using Java serialization) +- removed `throws ArangoDBException` from API method signatures (unchecked exception) +- removed passwords from debug level requests logs (#410) +- JPMS: explicit automatic module name +- updated `ArangoGraph.replaceEdgeDefinition()` +- CRUD methods to insert and replace multiple documents have now covariant argument types +- changed order of arguments in `ArangoDatabase.query()` overloads +- `ArangoCollection.rename()` and `ArangoView.rename()` do not change the collection or view name of the API class instance + +### Removed + +- removed user data custom serializer API based on `ArangoSerialization` (in favor of `ArangoSerde`) +- removed user data custom serializer implementation `ArangoJack` (in favor of `JacksonSerde`) +- removed support for interpreting raw strings as JSON (in favor of `RawJson`) +- removed support of data type `VPackSlice` (in favor of Jackson types: `JsonNode`, `ArrayNode`, `ObjectNode`, ...) +- removed client APIs already deprecated in Java Driver version `6` +- removed deprecated server APIs: + - `MMFiles` related APIs + - `ArangoDatabase.executeTraversal()` + - `ArangoDB.getLogs()` + - `minReplicationFactor` in collections and graphs + - `overwrite` flag in `DocumentCreateOptions` + - `hash` and `skipList` indexes +- removed `ArangoCursorInitializer` +- removed Asynchronous API (`com.arangodb.async`) +- removed `ArangoDatabase.getDocument()` +- removed automatic type inference in CRUD methods operating on multiple documents +- removed `DbName` in favor of plain strings + +## [6.23.0] - 2023-04-20 + +- deprecated `DbName` in favor of plain strings + +## [6.22.0] - 2023-04-18 + +- added support to `forceOneShardAttributeValue` query parameter (DE-541) + +## [6.21.0] - 2023-03-07 + +- added `x-arango-driver` header (DE-479) + +## [6.20.0] - 2022-11-29 + +- ArangoSearch cache (#472) +- support for `enterprise-hex-smart-vertex` shardingStrategy +- deprecated `com.arangodb.Function` + +## [6.19.0] - 2022-10-04 + +- added support for `search-alias` views (ArangoDB 3.10 #461) +- added support for nested search (ArangoDB 3.10, #460) +- added support for `classification`, `nearest_neighbors` and `minhash` search analyzers (ArangoDB 3.10, #458) +- added support for inverted indexes (ArangoDB 3.10, #457) +- added support for cluster dirty reads (ArangoDB 3.10, #455) +- added support for index stored values (ArangoDB 3.10) +- added support for geo index legacy polygons (ArangoDB 3.10) +- added support for getting query optimizer rules (ArangoDB 3.10) +- added support for enhanced cursor stats (ArangoDB 3.10) +- added support for computed values (ArangoDB 3.10) +- added support for index cache (ArangoDB 3.10) +- deprecated fulltext indexes (ArangoDB 3.10, #454) +- fixed `ConsolidationPolicy` API +- deprecated MMFiles collection attributes (#442) +- deprecated for removal `ArangoCursorInitializer` and `GraphDocumentReadOptions#isCatchException()` +- documented thead safe classes (#445) + +## [6.18.0] - 2022-06-07 + +- deprecated usage of deprecated server API (#440) +- introduced new field entity annotations: `@Id`, `@Key`, `@Rev`, `@From`, `@To` (#439) +- deprecated VPack serialization in favor of Jackson API +- added `deduplicate` option in `PersistentIndex` (#437) + +## [6.17.0] - 2022-05-17 + +- updated dependencies +- fixed IndexOutOfBoundsException in RoundRobinHostHandler (#435) +- warn on json request payload size too big (#434) +- fixed various serialization bugs in native image (#425) + +## [6.16.1] - 2022-02-23 + +- fixed retry behavior of HTTP connections in case of timeout exceptions (#429) +- fixed NPE when serializing ArangoSearch properties (#427) + +## [6.16.0] - 2022-01-27 + +- deprecated hash and skiplist indexes (#424) +- fixed active failover concurrency bug (#423) +- added support for overload metrics (ArangoDB 3.9, #419) +- added support for Segmentation and Collation ArangoSearch analyzers (ArangoDB 3.9, #418) +- added support for ZKD indexes (ArangoDB 3.9, #417) +- added `all` log topic (ArangoDB 3.9, #416) +- added support for Hybrid SmartGraphs (ArangoDB 3.9, #415) +- added support for database unicode names, added `DbName` class to represent database names in public API parameters to + ease unicode names normalization (ArangoDB 3.9, #405) + +## [6.15.0] - 2021-12-29 + +- JWT authentication (#421) +- fixed swallowing connection exceptions (#420) +- fixed `stopwords` analyzer (#414) +- set max retries for active failover redirects (#412) +- fixed deserializing `null` value as String (#411) + +## [6.14.0] - 2021-10-01 + +- fixed issues with non-English locales (#407) +- implemented support for `GET /_admin/server/id` +- fixed `acquireHostListInterval` javadoc, interval in milliseconds +- fixed NPE in `CursorEntity.extra` and `CursorEntity.Extras.stats` + +## [6.13.0] - 2021-08-25 + +- added support for `fillBlockCache` in AQL query options (ArangoDB v3.8.1) +- fixed exceptions handling during shutdown (#400) +- added native image configuration for HTTP communication +- updated native image reflection configuration + +## [6.12.3] - 2021-06-24 + +- fixed host handler failures count (#DEVSUP-805, #398) + +## [6.12.2] - 2021-06-17 + +- added missing enum value `QueryExecutionState.KILLED` (#391) +- fixed `acquireHostList` to loadBalancer or hostname alias (#385) + +**WARNING**: The implementation of Stopwords analyzer is not final in ArangoDB 3.8.0, so using it might result in unpredictable behavior. +This will be fixed in ArangoDB 3.8.1 and will have a different API. +Any usage of the current Java driver API related to it is therefore discouraged. + +## [6.12.1] - 2021-04-28 + +- fixed request timeout in async driver (#ES-837) + +## [6.12.0] - 2021-04-28 + +- added support for modifying collection schema + +## [6.11.1] - 2021-04-23 + +- velocypack v2.5.3 + +## [6.11.0] - 2021-04-21 + +- added support for getting db log entries via `GET /_admin/log/entries` (ArangoDB v3.8) +- added support for index estimates (ArangoDB v3.8) +- added support for ArangoSearch `AQL`, `Pipeline`, `Stopwords`, `GeoJSON`, `GeoPoint` analyzers (ArangoDB v3.8) +- fixed active failover behavior for the asynchronous driver (#381) +- deprecated `ArangoIterable` methods in favour of Java 8 Stream equivalents (#382) + +## [6.10.0] - 2021-03-27 + +- closing VST connection after 3 consecutive keepAlive failures (#ES-837) + +## [6.9.1] - 2021-03-23 + +- fixed `acquireHostList` in asynchronous driver (#377) +- fixed exception swallowing in `ArangoDatabaseAsync#exists` +- fixed performance issue when consuming big AQL cursor batches in stream mode (arangodb/arangodb#13476) + +## [6.9.0] - 2021-02-04 + +- added `com.arangodb.mapping.ArangoJack` to replace `com.arangodb.jackson.dataformat.velocypack.VelocyJack` (from + `com.arangodb:jackson-dataformat-velocypack`) + +- fixed removing removed coordinators from the hostlist (#347) + +## [6.8.2] - 2021-01-25 + +- fixed closing connection on failed authentication (#ES-772) + +## [6.8.1] - 2020-12-22 + +- fixed ignoring internal endpoints in acquireHostList (#DEVSUP-673) + +## [6.8.0] - 2020-12-10 + +- added configurable VST keep-alive + +## [6.7.5] - 2020-09-22 + +- allow customizing httpRequestRetryHandler + +## [6.7.4] - 2020-09-03 + +- fixed path escaping in `ArangoDatabase.route()` +- added ssl hostname verifier to ArangoDB builder + +## [6.7.3] - 2020-08-14 + +- added `users` field to `DBCreateOptions` +- velocypack v2.4.1 + +## [6.7.2] - 2020-07-29 + +- velocypack v2.4.0 + +## [6.7.1] - 2020-07-07 + +- fixed VST communication adding `accept` and `content-type` headers to every message +- fixed missing classes in GraalVM native image reflection configuration + +## [6.7.0] - 2020-07-01 + +- added support of schema validation (ArangoDB v3.7) +- added support of `overwriteMode` on document creation, to allow `insert-ignore`, `insert-replace` and `insert-update` (ArangoDB v3.7) +- added support of `mergeObjects` for insert document with `overwriteMode: update` (ArangoDB v3.7) +- added support of `storedValues` in `ArangoSearchProperties` (ArangoDB v3.7) +- added support of `primarySortCompression` in `ArangoSearchProperties` (ArangoDB v3.7) +- added support of `DisjointSmartGraphs` and `SatelliteGraphs` (ArangoDB v3.7) +- added support of `SatelliteGraphs` support (ArangoDB v3.7) +- allow specifying return type on document update +- added `peakMemoryUsage` to aql statistics + +## [6.7.0_PREVIEW_3.7.1-alpha.1] - 2020-05-22 + +- added support of `DisjointSmartGraphs` and `SatelliteGraphs` (ArangoDB v3.7) +- added support of `storedValues` in `ArangoSearchProperties` (ArangoDB v3.7) +- added support of `primarySortCompression` in `ArangoSearchProperties` (ArangoDB v3.7) +- added support of `overwriteMode` on document creation, to allow `insert-ignore`, `insert-replace` and `insert-update` (ArangoDB v3.7) +- added support of `mergeObjects` for insert document with `overwriteMode: update` (ArangoDB v3.7) +- velocypack v2.3.1 + +## [6.6.3] - 2020-05-06 + +- velocypack v2.3.1 + +## [6.6.2] - 2020-04-07 + +- bugfix VelocyJack deserialization +- bugfix `allowImplicit` parameter in stream transactions + +## [6.7.0_PREVIEW_3.7.0-alpha.2_0] - 2020-03-24 + +- added `overwriteMode` parameter to support insert-update (ArangoDB v3.7) +- satellite graphs support (ArangoDB v3.7) +- schema validation (ArangoDB v3.7) +- added `peakMemoryUsage` to aql statistics + +## [6.6.1] - 2020-03-18 + +- GraalVM Native Image support +- fixed acquire host list (ArangoDB v3.7) + +## [6.6.0] - 2020-02-03 + +- typed ArangoSearch analyzers +- updated dependecies +- bugfix asynchronous shutdown + +## [6.5.0] - 2019-12-23 + +- createDatabase with options (replicationFactor, minReplicationFactor, sharding) (ArangoDB v3.6) +- extended DatabaseEntity with replicationFactor, minReplicationFactor, sharding (ArangoDB v3.6) +- timeout option for AQL queries (ArangoDB v3.6) +- enhancedNgramAnalyzer and enhancedTextAnalyzer (ArangoDB v3.6) +- velocypack v2.1.0 + +## [6.4.1] - 2019-10-23 + +- jackson v2.9.10 + +## [6.4.0] - 2019-10-09 + +### Added + +- Stream Transactions support for graph APIs + +### Fixed + +- `catchExceptions` option in async `getEdge` and `getVertex` + +## [6.3.0] - 2019-09-16 + +### Added + +- support for keyType uuid & padded + +### Fixed + +- bugfix AqlExecutionExplainEntity indexes +- bugfix reconnection after more than 3 failures + +## [6.2.0] - 2019-09-05 + +- merged async driver +- bugfix method chaining in IndexOptions + +## [6.1.0] - 2019-08-29 + +### Added + +- updated maven dependencies + +### Fixed + +- custom serde not always used +- `documentExists()` and `getDocument` behaviour on non existing `transactionId` + +## [6.0.0] - 2019-08-20 + +### Added + +- split `GraphDocumentReadOptions` from `DocumentReadOptions` (breaking change) +- added `ArangoCollection#getResponsibleShard(Object)` +- added support for Analyzers +- added support for Stream Transactions +- added support for named indices +- added support for TTL indices +- added minReplicationAttribute for collections and graphs + +## [5.0.7] - 2019-07-19 + +### Fixed + +- properly all load all configuration defaults + +### Added + +- added acquireHostListInterval configuration parameter + +## [5.0.6] - 2019-05-24 + +### Added + +- requests are now storing header information +- faster test code execution + +## [5.0.5] - 2019-05-24 + +### Fixed + +- host handling (issue #241) +- logging extended hostresolver + +### Added + +- add arangodb.httpCookieSpec +- added smartJoinAttribute and shardingStrategy collection attributes + +## [5.0.4] - 2019-18-01 + +### Fixed + +- fixed bug with multi document operations when using parameter `silent` (issue #241) + +## [5.0.3] - 2018-11-12 + +### Fixed + +- adaption to changed ArangoSearch API + +## [5.0.2] - 2018-11-09 + +### Added + +- added `ArangoGraph#drop(boolean dropCollections)` + +### Changed + +- changed `ArangoDB#timeout` to also set the request timeout when using VelocyStream (issue #230) + +### Fixed + +- fixed compatibility of `ArangoCursor#filter` with Java 6 +- fixed replace-insert with `DocumentCreateOptions#overwrite(Boolean)` for `ArangoCollection#insertDocuments` +- removed unused dependency + +## [5.0.1] - 2018-09-25 + +### Fixed + +- fixed `ArangoCursor#next` when performing a dirty read +- fixed connection stickiness + +## [5.0.0] - 2018-09-18 + +### Added + +- added dirty read support ([reading from followers](https://docs.arangodb.com/stable/deploy/active-failover/administration/#reading-from-follower)) + + - added option `AqlQueryOptions#allowDirtyRead` for `ArangoDatabase#query`. + - added option `DocumentReadOptions#allowDirtyRead` for `ArangoCollection#getDocument` + - added option `DocumentReadOptions#allowDirtyRead` for `ArangoCollection#getDocuments` + - added option `DocumentReadOptions#allowDirtyRead` for `ArangoVertexCollection#getVertex` + - added option `DocumentReadOptions#allowDirtyRead` for `ArangoEdgeCollection#getEdge` + +### Changed + +- changed the internal connection pool and host management. There now exists a connection pool for every configured host. This changes the behavior of `ArangoDB.Builder#maxConnections` which now allows to configure the maximal number of connection per host and not overall. +- changed `IndexEntity#selectivityEstimate` from `Integer` to `Double` +- upgraded dependency velocypack 1.4.1 + + - added support for generic types + + Serialize the class name in a field \_class when necessary. Field name can be configured through VPack.Builder#typeKey(String) + +## [4.7.3] - 2018-09-03 + +### Changed + +- made `AqlQueryOptions#Options` serializable + +## [4.7.2] - 2018-09-03 + +### Changed + +- made `AqlQueryOptions` serializable + +## [4.7.1] - 2018-09-03 + +### Fixed + +- applied arangosearch API changes for ArangoDB 3.4.0 +- fixed `ArangoCursor#close()`: check hasNext before close (issue #223) + +## [4.7.0] - 2018-08-02 + +### Added + +- added View support + - added `ArangoDatabase#view(String): ArangoView` + - added `ArangoDatabase#getViews(): Collection` + - added `ArangoView` +- added arangosearch support + - added `ArangoDatabase#arangoSearch(String): ArangoSearch` + - added `ArangoSearch` +- added `ArangoCursor#first()` +- added `java.util.stream.Stream` like methods for `ArangoCursor` + - added `ArangoCursor#foreach(Consumer)` + - added `ArangoCursor#map(Function)` + - added `ArangoCursor#filter(Predicate)` + - added `ArangoCursor#anyMatch(Predicate)` + - added `ArangoCursor#allMatch(Predicate)` + - added `ArangoCursor#noneMatch(Predicate)` + - added `ArangoCursor#collectInto(Collection)` +- added interface `Entity` for entities in `com.arangodb.entity` + +### Changed + +- upgraded dependency velocypack 1.3.0 + - `VPackDeserializationContext#deserialize(VPackSlice, Class)` to `VPackDeserializationContext#deserialize(VPackSlice, java.lang.reflect.Type)` + +## [4.6.1] - 2018-07-12 + +### Added + +- added convenience method `ArangoDatabase#query(String, Class)` +- added convenience method `ArangoDatabase#query(String, Map, Class)` +- added convenience method `ArangoDatabase#query(String, AqlQueryOptions, Class)` + +### Fixed + +- fixed `ArangoCollection#rename(String)` + + Change field `name` in `ArangoCollection` after rename so that future requests through the instance will be made with the new collection name. + +- fixed missing `ArangoDatabase.util() : ArangoSerialization` +- fixed missing `ArangoCollection.util() : ArangoSerialization` +- fixed missing `ArangoGraph.util() : ArangoSerialization` +- fixed missing `ArangoVertexCollection.util() : ArangoSerialization` +- fixed missing `ArangoEdgeCollection.util() : ArangoSerialization` + +## [4.6.0] - 2018-07-02 + +### Added + +- added convenience methods for arbitrary requests + - added `ArangoDatabase.route(String...)` +- added `DocumentCreateOptions#silent(Boolean)` +- added `DocumentReplaceOptions#silent(Boolean)` +- added `DocumentUpdateOptions#silent(Boolean)` +- added `DocumentDeleteOptions#silent(Boolean)` +- added support for exclusive write operations (issue #190) + - added `TransactionOptions#exclusiveCollections(String[])` + +### Removed + +- removed unnecessary deserializer for internal `_id` field + +### Fixed + +- fixed serializing of documents/edges: use custom serializer + +## [4.5.2] - 2018-06-25 + +### Added + +- added support for custom serializer + - added `ArangoDB.Builder#serializer(ArangoSerialization)` + - added link to jackson-dataformat-velocypack in docs + +## [4.5.1] - 2018-06-21 + +### Fixed + +- fixed `exists()` method in `ArangoDatabase`, `ArangoCollection`, `ArangoGraph`: check for ArangoDB error num +- fixed `ArangoDB#aquireHostList(true)` with authentication + +## [4.5.0] - 2018-06-11 + +### Added + +- added replace-insert support: `DocumentCreateOptions#overwrite(Boolean)` +- added support for satellite collections: `CollectionCreateOptions#satellite(Boolean)` +- added `AqlQueryOptions#stream(boolean)` for Streaming AQL Cursors +- added `ArangoDatabase#create()` +- added `ArangoCollection#create()` +- added `ArangoCollection#create(CollectionCreateOptions)` +- added `ArangoGraph#create(Collection)` +- added `ArangoGraph#create(Collection, GraphCreateOptions)` +- added return type for `ArangoDatabase#deleteAqlFunction()` +- added field `AqlFunctionEntity#isDeterministic` + +### Changed + +- upgraded dependency velocypack 1.2.0 + - replaced dependency json-simple with jackson +- extracted interfaces for ArangoDB API + +### Removed + +- removed deprecated `ArangoDB.Builder#host(String)` +- removed deprecated `ArangoDB.Builder#port(Integer)` +- removed deprecated `ArangoCollection#create[IndexType]Index()` +- removed deprecated `ArangoDatabase#updateUserDefaultCollectionAccess()` +- removed deprecated `ArangoDB#updateUserDefaultDatabaseAccess()` +- removed deprecated `ArangoDB#updateUserDefaultCollectionAccess()` +- removed several deprecated APIs + +## Fixed + +- fixed `aquireHostList` bug when using active failover + +## [4.4.1] - 2018-06-04 + +### Fixed + +- fixed concurrency bug in VST when using connectionTtl + +## [4.4.0] - 2018-04-19 + +### Changed + +- changed dependency com.arangodb:velocypack to 1.1.0 + - fixed DateUtil does incorrect conversion of UTC time + - serialize `BigInteger`/`BigDecimal` as `String` + +### Fixed + +- fixed reconnecting after ArangoDB restarts (issue #186) +- fixed `ArangoCollection#updateDocuments()` ignoring `DocumentUpdateOptions#serializeNull` (issue #180) + +## [4.3.7] - 2018-04-17 + +### Fixed + +- fixed property loading + +## [4.3.6] - 2018-04-16 + +### Added + +- added `ArangoDB.Builder#maxConnectionTtl(Integer)` (Issue #141, #186) + +## [4.3.5] - 2018-04-11 + +### Fixed + +- fixed compatibility for `ArangoDatabase#getAqlFunctions()` for ArangoDB 3.4 +- fixed internal exception handling in VST connection + +## [4.3.4] - 2018-03-21 + +### Changed + +- made `ErrorEntity` serializable (Issue #178) + +### Fixed + +- fixed serialization of bind parameter with null values (Issue #176, #177) +- fixed VelocyStream multi-thread authentication bug +- fixed load balancing cursor stickiness bug + +## [4.3.3] - 2018-02-01 + +### Added + +- added `CollectionCreateOptions#distributeShardsLike(String)` (Issue #170) +- added `AqlQueryOptions#memoryLimit(Long)` +- added `AqlQueryOptions#failOnWarning(Boolean)` +- added `AqlQueryOptions#maxTransactionSize(Long)` +- added `AqlQueryOptions#maxWarningCount(Long)` +- added `AqlQueryOptions#intermediateCommitCount(Long)` +- added `AqlQueryOptions#intermediateCommitSize(Long)` +- added `AqlQueryOptions#satelliteSyncWait(Double)` +- added `AqlQueryOptions#skipInaccessibleCollections(Boolean)` +- added `TransactionOptions#maxTransactionSize(Long)` +- added `TransactionOptions#intermediateCommitCount(Long)` +- added `TransactionOptions#intermediateCommitSize(Long)` +- added `QueryEntity#getBindVars(): Map` +- added `QueryEntity#getState(): QueryExecutionState` + +### Fixed + +- fixed inconsistency of `ArangoCollection#getDocument()` variants (Issue #168) + +## [4.3.2] - 2017-11-30 + +### Fixed + +- fixed redirect header (uppercase) + +## [4.3.1] - 2017-11-27 + +### Fixed + +- fixed default JSON parsing, include null values (Issue #163) +- fixed JSON parsing of negative long (Issue #151) + +## [4.3.0] - 2017-11-23 + +### Added + +- added load balancing (`ArangoDB.Builder#loadBalancingStrategy()`) +- added automatic acquiring of hosts for load balancing or as fallback (`ArangoDB.Builder#acquireHostList()`) + +## [4.2.7] - 2017-11-03 + +### Added + +- added `ArangoGraph#exists()` + +### Fixed + +- fixed deserialization of `BigDecimal` + +## [4.2.6] - 2017-10-23 + +### Changed + +- exclude junit dependency of json-simple + +### Fixed + +- fixed de-/serialization of negative int values (issue #151) + +## [4.2.5] - 2017-10-16 + +### Added + +- added `ArangoCollection#exists()` (issue #146) +- added `ArangoDatabase#exists()` +- added `BaseDocument#setId(String)` (issue #152) +- added `GraphCreateOptions#replicationFactor(Integer)` + +### Changed + +- `ArangoDB#shutdown()` now closes all connections (issue #156) + +## [4.2.4] - 2017-09-04 + +### Added + +- added properties validation `arangodb.host` +- added `ArangoCollection#ensureIndex()` + +### Changed + +- let `ArangoCursor` implement `Iterable` + +### Deprecated + +- deprecated `ArangoCollection#createIndex()` + +### Fixed + +- fixed `ArangoDatabase#transaction()`: ignore null result +- fixed `ArangoCollection#updateDocument()` (issue #145) +- fixed `ArangoVertexCollection#updateVertex()` (issue #145) +- fixed `ArangoEdgeCollection#updateEdge()` (issue #145) + +## [4.2.3] - 2017-07-31 + +### Added + +- added `ArangoDatabase#getPermissions(String)` +- added `ArangoCollection#getPermissions(String)` +- added `ArangoDB#grantDefaultDatabaseAccess(String, Permissions)` +- added `ArangoDB#grantDefaultCollectionAccess(String, Permissions)` +- added `ArangoDatabase#grantDefaultCollectionAccess(String, Permissions)` + +### Fixed + +- fixed `DateUtil` (thread-safe) + +## [4.2.2] - 2017-07-20 + +### Added + +- added `ArangoDatabase#grantAccess(String, Permissions)` +- added `ArangoCollection#grantAccess(String, Permissions)` +- added `ArangoDatabase#resetAccess(String)` +- added `ArangoCollection#resetAccess(String)` +- added `ArangoDB#updateUserDefaultDatabaseAccess(String, Permissions)` +- added `ArangoDB#updateUserDefaultCollectionAccess(String, Permissions)` +- added `ArangoDatabase#updateUserDefaultCollectionAccess(String, Permissions)` +- added `ArangoCollection#getDocuments(Collection, Class)` +- added connection/handshake retry on same host +- added deduplicate field for hash/skiplist index + +## [4.2.1] - 2017-06-20 + +### Fixed + +- fixed deserializing of internal field `_id` + +## [4.2.0] - 2017-06-14 + +### Added + +- added `ArangoDBVersion#getLicense()` +- added `ArangoDB#getRole()` +- added `ArangoDBException#getException()` +- added protocol switch (`ArangoDB.Builder#protocol(Protocol)`) + - `Protocol#VST` = VeclocyStream (default) + - `Protocol#HTTP_JSON` = JSON over HTTP + - `Protocol#HTTP_VPACK` = VelocyPack over HTTP + +## [4.1.12] - 2017-04-13 + +### Added + +- added `ArangoDatabase#cursor()` (issue #116) + +### Changed + +- optimized `ArangoDB.Builder` for better multi thread support + +### Fixed + +- fixed `VPackSlice` `float`/`double` bug + +## [4.1.11] - 2017-03-24 + +### Added + +- added convenience methods `ArangoDatabase#arango()`, `ArangoCollection#db()`, `ArangoGraph#db()` +- added convenience methods `ArangoCollection#getIndex(String)`, `ArangoCollection#deleteIndex(key)` +- added connection pooling (issue #103) +- added extension point for `VelocyPack` serialization (`ArangoDB#registerModule()`) +- added support for replacing build-in VelocyPack serializer/deserializer +- added `ArangoDatabase#getVersion()`, `ArangoDatabase#getAccessibleDatabases()` + +### Changed + +- extracted VelocyPack implementation to https://github.com/arangodb/java-velocypack + +### Fixed + +- fixed exception handling in Connection (issue #110) +- fixed NPE in `ArangoCursor` (issue #112) + +## [4.1.10] - 2017-02-22 + +### Added + +- added support for multiple hosts as fallbacks +- added support serializing collections with null elements +- added support serializing non-generic classes that extend collections +- added support serializing/deserializing byte and Byte +- added default value "root" for user + +### Changed + +- changed velocystream message sending to async +- changed return value of getVertex/getEdge to null if not exists + +### Fixed + +- fixed serialization of additionalFields for objects and maps +- fixed VPack parsing (arrays of specific length) + +## [4.1.9] - 2017-02-10 + +### Added + +- added missing `IndexType#edge` + +### Fixed + +- fixed Connection (thread-safe) +- fixed URI encoding + +## [4.1.8] - 2017-02-03 + +### Added + +- added byte[] de-/serialization from/to `VPack.string` (Base64) +- added ArangoCollection.drop(isSystem) +- improved ArangoDBException with responseCode, errorNum, errorMessage + +### Changed + +- changed `java.util.Date` serialization from `VPack.date` to `VPack.string` (ISO 8601) +- changed `java.sql.Date` serialization from `VPack.date` to `VPack.string` (ISO 8601) +- changed `java.sql.Timestamp` serialization from `VPack.date` to `VPack.string` (ISO 8601) +- changed `ArangoCollection#deleteDocuments()` to work with keys and documents + +### Fixed + +- fixed URL encoding bug (#97) +- fixed update/replaceDocumets with JSON (#98) + +## [4.1.7] - 2017-01-26 + +### Fixed + +- fixed `importDocuments`, `insertDocuments` to work with raw JSONs (issue #91) + +## [4.1.6] - 2017-01-18 + +### Added + +- added serializer support for enclosing types + +## [4.1.5] - 2017-01-12 + +### Added + +- added configuration for custom annotations within `VPack` de-/serialization +- added support of transient modifier within `VPack` de-/serialization + +### Fixed + +- fixed `VPack` String serialization (UTF-8 encoding) +- fixed `VPack` parsing of fields of type Object +- fixed `VPack` serializing of array with null values (issue #88) + +## [4.1.4] - 2016-12-19 + +### Added + +- added `VPack` serializer/de-serializer for `java.util.UUID` + +### Fixed + +- fixed `VPack` parsing (issue #65, #80, #82) + +## [4.1.3] - 2016-11-22 + +### Added + +- added bulk import API + +### Fixed + +- fixed error while serializing long values with VPackBuilder + +## [4.1.2] - 2016-11-10 + +### Added + +- added `VelocyPack` UTC_DATE parsing to JSON String (ISO 8601) +- added configuration methods for `VPackParser` in `ArangoDB.Builder` +- added `VPackJsonSerializer` for `VPackParser` + +### Fixed + +- fixed `GraphEntity` for `ArangoDatabase#getGraphs()` (field `name` is null) + +## [4.1.1] - 2016-11-09 + +### Added + +- added option `CollectionCreateOptions#replicationFactor` +- added option `CollectionPropertiesEntity#replicationFactor` +- added option `DocumentUpdateOptions#serializeNull` + +### Changed + +- changed json parsing of VelocyPack types not known in json + +### Fixed + +- fixed VelocyPack bug with non-ASCII characters + +## [4.1.0] - 2016-10-28 + +### Added + +- added `ArangoUtil` for manually de-/serialization + +### Changed + +- changed VelocyStream communication (send protocol header) + +## [4.0.0] - 2016-10-17 + +### Added + +- added VelocyPack support +- added multi document operations (insert, delete, update, replace) + +### Replaced + +- replaced API +- replaced protocol http with VelocyStream + +## [3.1.0] - 2016-10-17 + +### Added + +- added profile flag to AqlQueryOptions (issue #47) + +### Changed + +- changed Revision from long to String + +### Removed + +- removed methods with collectionId (long) from `ArangoDriver` (Id is only for internal usage) +- removed methods with documentId (long) from `ArangoDriver` + +## [3.0.4] - 2016-10-17 + +### Fixed + +- fixed edges deserializer (issue #50) + +## [3.0.3] - 2016-09-12 + +### Added + +- added error handling in getBatchResponseByRequestId() +- added function createPersistentIndex() (issue #48) +- added deserializer for BaseDocument (issue #50) + +## [3.0.2] - 2016-08-05 + +### Added + +- added profile flag to AqlQueryOptions (issue #47) +- added getExtra() to DocumentCursor<> (issue #47) +- added IndexType.PERSISTENT (issue #48) + +## [3.0.1] - 2016-07-08 + +### Added + +- added flag complete and details in ImportOptions + +### Fixed + +- fixed issue #43 (ArangoDriver.getAqlFunctions(String) does not uses the defaultDatabase setting) + +## [3.0.0] - 2016-06-17 + +### Added + +- added User-Method grantDatabaseAccess(username, database) +- added Transaction attribute allowImplicit + +### Changed + +- refactored QueryCachePropertiesEntity, TransactionResultEntity + +### Replaced + +- replaced Graph-Functions (graph_edge, graph_vertices, graph_shortes_path) with AQL + +### Removed + +- removed ArangoDriver.EdgeEntity() (/\_api/edge withdrawn in Server) +- removed CAP-Index (Cap-constraints are withdrawn in Server) +- removed Param database in User-Methods (in 3.0 users are managed in \_users Collection in \_system Database only) +- removed deprecated Methods + +## [2.7.4] - 2016-04-15 + +### Fixed + +- fixed issue #35 (There is no replaceDocumentRaw similar to createDocumentRaw) + +## [2.7.3] - 2016-03-25 + +### Fixed + +- batch driver performance fix +- fixed issue #33 (typo in ArangoDriver.executeAqlQueryWithDocumentCursorResutl method) + +## [2.7.2] - 2016-01-22 + +### Added + +- added executeAqlQueryRaw(...). Example src/test/java/com/arangodb/example/document/RawDocumentExample.java + +## [2.7.1] - 2016-01-21 + +### Added + +- added examples for new AQL traversal functions (since ArangoDB 2.8) +- added AQL warnings to CursorResult (hasWarning() and getWarnings()) +- added createDocumentRaw(...) and getDocumentRaw(...). Examples src/test/java/com/arangodb/example/document/RawDocumentExample.java + +### Changed + +- Updated dependencies gson (2.5), httpclient (4.5.1) and slf4j-api (1.7.13) + +## [2.7.0] - 2015-11-20 + +### Added + +- added document examples in src/test/java/com/arangodb/example/document/ +- added graph examples in src/test/java/com/arangodb/example/document/ +- added new function executeAqlQueryJSON(): Executes an AQL query and returns the raw JSON response as a String +- initial support of HTTPS connections. Examples src/test/java/com/arangodb/example/ssl/ + +## [2.6.9] - 2015-10-16 + +### Added + +- added support API: `/_api/query-cache/properties` (AQL query tracking properties: setQueryTrackingProperties(), getQueryTrackingProperties()) +- added support API: `/_api/query-cache` (delete AQL query cache: deleteQueryCache()) +- added support API: `/_api/query/current` (currently running AQL queries: getCurrentlyRunningQueries()) +- added support API: `/_api/query/slow` (slow AQL queries: getSlowQueries(), deleteSlowQueries()) +- added support API: `/_api/query` (kill AQL queries: killQuery()) +- added boolean exists(long collectionId, long documentId) +- added boolean exists(String collectionName, long documentId) +- added boolean exists(long collectionId, String documentKey) throws ArangoException { +- added boolean exists(String collectionName, String documentKey) +- added boolean exists(String documentHandle) + +## [2.6.8] - 2015-09-25 + +### Fixed + +- fixed GRAPH_EDGES() 2.6-incompatibility + +## [2.5.6] - 2015-07-04 + +### Added + +- ArangoDB 2.6 support + +### Fixed + +- fixed issue #19 \* createEdge takes graphName but needs database name + +## [2.5.5] - 2015-05-23 + +### Added + +- added an examples for Transaction API + (see src/test/java/com/arangodb/example/TransactionExample.java) +- added TraversalQueryOptions to avoid too many parameters + +### Changed + +- updated `driver.getTraversal(...);` +- changed TransactionEntity.ReadWriteCollections to a static class (issue #17) + +### Removed + +- removed VisitedEntity (Traversal) + +## [2.5.4] - 2015-05-03 + +### Added + +- added new cursor implementation for AQL queries + - DocumentCursor executeDocumentQuery(...) + - VertexCursor executeVertexQuery(...) + - EdgeCursor executeEdgeQuery(...) +- added new cursor implementation for simple queries + - DocumentCursor executeSimpleAllDocuments(...) + - DocumentCursor executeSimpleByExampleDocuments(...) + - DocumentCursor executeSimpleRangeWithDocuments(...) + - DocumentCursor executeSimpleFulltextWithDocuments(...) +- added some examples for AQL queries + (see src/test/java/com/arangodb/example) + +### Fixed + +- fixed issue #12 + - added auto reconnection when connection breaks + - added fallback server endpoints + +## [2.5.3] - 2015-03-29 + +### Fixed + +- fixed issue #9 \* added method to driver.getTraversal(...); + +## [2.5.0] + +### Added + +Added support for sparse indexes + +## [2.4.4] + +### Fixed + +- fixed issue #5 + - added method to driver.createGraph(GraphEntity g); +- fixed issue #6 +- fixed issue #7 + +## [2.4.3] + +### Added + +- Some additional methods in GraphEntity: + - public EdgeDefinitionsEntity getEdgeDefinitionsEntity() + - public void setEdgeDefinitionsEntity(EdgeDefinitionsEntity edgeDefinitionsEntity) +- Some additional methods in EdgeDefinitionsEntity: + - public int getSize() + - public EdgeDefinitionEntity getEdgeDefinition(String collectionName) + +### Changed + +- GraphEntity has been changed, so that edge definitions are stored in an EdgeDefinitionsEntity. + +### Fixed + +- Fixed a graph bug: when retrieving a graph via the driver, "from" and "to" were emtpy. This is fixed now. + +## [2.4.2] + +### Fixed + +- Fixed issue#2 + +## [2.4.1] + +### Changed + +- httpclient version 4.3.6 + +## [1.4.1] - 2014-02-04 + +### Added + +- added support API: GET `/_api/database/user` (getDatabases) +- added debug property: enableCURLLogger +- added Annotation @DocumentKey and support user define document key. +- added document API support `_key`. + +### Changed + +- rename attribute: UserEntity#user -> UserEntity#username +- modify API: createDatabase(db) -> createDatabase(db, users...) + +### Fixed + +- Bug fixed: raise error if `_key` is not number. +- Fixed OraacleJDK build error.(#11) + +## [1.4.0] - 2013-11-26 + +### Add + +- support database (for 1.4 feature) +- stop the API of edge, edges in version 1.4.0 +- stop the API of kvs(`/_api/key`) in version 1.4.0(because server does not support yet.) +- add support API: `/_api/database/` +- add configure: defaultDatabase +- add support API: `/_admin/statistics` (getStatistics) +- add support API: `/_admin/statistics-description` (statisticsDescription) +- add support API: `/_api/endpoint` +- add support API: `/_api/collection/{collection-name}/checksum` (getCollectionChecksum) +- add support API: `/_api/example/first` +- add support API: `/_api/example/last` +- add support API: `/_api/replication/inventory` (getReplicationInventory) +- add support API: `/_api/replication/dump` (getReplicationDump) +- add support API: `/_api/replication/server-id` (getReplicationServerId) +- add support API: `/_api/replication/logger-start` (startReplicationLogger) +- add support API: `/_api/replication/logger-stop` (stopReplicationLogger) +- add support API: GET `/_api/replication/logger-state` (getReplicationLoggerState) +- add support API: GET `/_api/replication/logger-config` (getReplicationLoggerConfig) +- add support API: PUT `/_api/replication/logger-config` (setReplicationLoggerConfig) +- add support API: GET `/_api/replication/applier-config` (getReplicationApplierConfig) +- add support API: PUT `/_api/replication/applier-config` (setReplicationApplierConfig) +- add support API: PUT `/_api/replication/applier-start` (startReplicationApplier) +- add support API: PUT `/_api/replication/applier-stop` (stopReplicationApplier) +- add support API: GET `/_api/replication/applier-state` (getReplicationApplierState) +- add support API: POST `/_admin/execute` (executeScript) +- add support API: POST `/_api/graph` (createGraph) +- add support API: GET `/_api/graph` (getGraphs) +- add support API: GET `/_api/graph/{graph-name}` (getGraph) +- add support API: DELETE `/_api/graph/{graph-name}` (deleteGraph) +- add support API: POST `/_api/graph/{graph-name}/vertex` (createVertex) +- add support API: GET `/_api/graph/{graph-name}/vertex/{document-key}` (getVertex) +- add support API: DELETE `/_api/graph/{graph-name}/vertex/{document-key}` (deleteVertex) +- add support API: PUT `/_api/graph/{graph-name}/vertex/{document-key}` (replaceVertex) +- add support API: PATCH `/_api/graph/{graph-name}/vertex/{document-key}` (updateVertex) +- add support API: POST `/_api/graph/{graph-name}/vertices` (getVertices) +- add support API: POST `/_api/graph/{graph-name}/vertices/{vertex-key}` (getVertices) +- add support API: POST `/_api/graph/{graph-name}/edge` (createEdge) +- add support API: GET `/_api/graph/{graph-name}/edge/{edge-key}` (getEdge) +- add support API: DELETE `/_api/graph/{graph-name}/edge/{edge-key}` (deleteEdge) +- add support API: PUT `/_api/graph/{graph-name}/edge/{edge-key}` (replaceEdge) +- add support API: POST `/_api/graph/{graph-name}/edges` (getEdges) +- add support API: POST `/_api/graph/{graph-name}/edges/{vertex-key}` (getEdges) +- add attribute: CollectionEntity#checksum +- add attribute: CollectionEntity#doCompact +- add attribute: CollectionEntity#keyOptions +- add attribute: CollectionEntity.Figures#(compactorsCount,compactorsFileSize,shapefilesCount,shapefilesFileSize,shapesCount,attributesCount) +- add doCompact to argument of createCollection + +### Changed + +- getDocuments was changed to return document-handle +- rename method: updateDocument -> replaceDocument +- rename method: partialUpdateDocument -> updateDocument +- changed the version API endpoint. (/\_admin/version -> /\_api/version) +- changed into createOptions to keyOptions of collection API +- refactoring deserialize of parameterized entity class. +- gson library upgrade to 2.2.4 + +### Removed + +- remove attribute: CollectionEntity#createOptions +- remove: getServerStatus +- remove: getConnectionStatistics + +#### Fixed + +- fixed ArangoUnixTime bug. + +## [1.2.2] - 2013-07-10 + +### Added + +- add support API: `/_api/explain` +- add support API: `/_api/collection/collection-name/revision` +- add support API: `/_api/index of fulltext` +- add support API: `/_api/simple/fulltext` +- add support API: `/_admin/modules/flush` +- add support API: `/_admin/routing/reload` +- add support API: User Management +- add support: Basic Authentication +- `/_api/simple/all` and `/_api/simple/by-example` returns DocumentEntity +- add support import API + +## [1.2.1] - 2013-07-02 + +### Added + +- Add support: load configure from property-file in classpath. +- Add configure: timeout, connectionTimeout, retryCount. + +### Changed + +- Change google-gson scope in pom.xml +- Change logback-classic in pom.xml + +### Remove + +- Remove configure: autoUnknownCollections. +- Remove README.JA + +## [1.2.0] - 2013-06-30 + +- Initial Release + + + +[unreleased]: https://github.com/arangodb/arangodb-java-driver/compare/v7.0.0...HEAD +[7.0.0]: https://github.com/arangodb/arangodb-java-driver/compare/v6.23.0..v7.0.0 +[6.23.0]: https://github.com/arangodb/arangodb-java-driver/compare/v6.22.0..v6.23.0 +[6.22.0]: https://github.com/arangodb/arangodb-java-driver/compare/v6.21.0..v6.22.0 +[6.21.0]: https://github.com/arangodb/arangodb-java-driver/compare/v6.20.0..v6.21.0 +[6.20.0]: https://github.com/arangodb/arangodb-java-driver/compare/v6.19.0..v6.20.0 +[6.19.0]: https://github.com/arangodb/arangodb-java-driver/compare/v6.18.0..v6.19.0 +[6.18.0]: https://github.com/arangodb/arangodb-java-driver/compare/v6.17.0..v6.18.0 +[6.17.0]: https://github.com/arangodb/arangodb-java-driver/compare/v6.16.1..v6.17.0 +[6.16.1]: https://github.com/arangodb/arangodb-java-driver/compare/v6.16.0..v6.16.1 +[6.16.0]: https://github.com/arangodb/arangodb-java-driver/compare/v6.15.0..v6.16.0 +[6.15.0]: https://github.com/arangodb/arangodb-java-driver/compare/v6.14.0..v6.15.0 +[6.14.0]: https://github.com/arangodb/arangodb-java-driver/compare/v6.13.0..v6.14.0 +[6.13.0]: https://github.com/arangodb/arangodb-java-driver/compare/v6.12.3..v6.13.0 +[6.12.3]: https://github.com/arangodb/arangodb-java-driver/compare/v6.12.2..v6.12.3 +[6.12.2]: https://github.com/arangodb/arangodb-java-driver/compare/v6.12.1..v6.12.2 +[6.12.1]: https://github.com/arangodb/arangodb-java-driver/compare/v6.12.0..v6.12.1 +[6.12.0]: https://github.com/arangodb/arangodb-java-driver/compare/v6.11.1..v6.12.0 +[6.11.1]: https://github.com/arangodb/arangodb-java-driver/compare/v6.11.0..v6.11.1 +[6.11.0]: https://github.com/arangodb/arangodb-java-driver/compare/v6.10.0..v6.11.0 +[6.10.0]: https://github.com/arangodb/arangodb-java-driver/compare/v6.9.1..v6.10.0 +[6.9.1]: https://github.com/arangodb/arangodb-java-driver/compare/v6.9.0..v6.9.1 +[6.9.0]: https://github.com/arangodb/arangodb-java-driver/compare/v6.8.2..v6.9.0 +[6.8.2]: https://github.com/arangodb/arangodb-java-driver/compare/v6.8.1..v6.8.2 +[6.8.1]: https://github.com/arangodb/arangodb-java-driver/compare/v6.8.0..v6.8.1 +[6.8.0]: https://github.com/arangodb/arangodb-java-driver/compare/v6.7.5..v6.8.0 +[6.7.5]: https://github.com/arangodb/arangodb-java-driver/compare/v6.7.4..v6.7.5 +[6.7.4]: https://github.com/arangodb/arangodb-java-driver/compare/v6.7.3..v6.7.4 +[6.7.3]: https://github.com/arangodb/arangodb-java-driver/compare/v6.7.2..v6.7.3 +[6.7.2]: https://github.com/arangodb/arangodb-java-driver/compare/v6.7.1..v6.7.2 +[6.7.1]: https://github.com/arangodb/arangodb-java-driver/compare/v6.7.0..v6.7.1 +[6.7.0]: https://github.com/arangodb/arangodb-java-driver/compare/v6.6.3..v6.7.0 +[6.6.3]: https://github.com/arangodb/arangodb-java-driver/compare/v6.6.2..v6.6.3 +[6.6.2]: https://github.com/arangodb/arangodb-java-driver/compare/v6.6.1..v6.6.2 +[6.6.1]: https://github.com/arangodb/arangodb-java-driver/compare/v6.6.0..v6.6.1 +[6.6.0]: https://github.com/arangodb/arangodb-java-driver/compare/v6.5.0..v6.6.0 +[6.5.0]: https://github.com/arangodb/arangodb-java-driver/compare/v6.4.1..v6.5.0 +[6.4.1]: https://github.com/arangodb/arangodb-java-driver/compare/v6.4.0..v6.4.1 +[6.4.0]: https://github.com/arangodb/arangodb-java-driver/compare/v6.3.0..v6.4.0 +[6.3.0]: https://github.com/arangodb/arangodb-java-driver/compare/v6.2.0..v6.3.0 +[6.2.0]: https://github.com/arangodb/arangodb-java-driver/compare/v6.1.0..v6.2.0 +[6.1.0]: https://github.com/arangodb/arangodb-java-driver/compare/v6.0.0..v6.1.0 +[6.0.0]: https://github.com/arangodb/arangodb-java-driver/compare/5.0.7..v6.0.0 +[5.0.7]: https://github.com/arangodb/arangodb-java-driver/compare/5.0.6...5.0.7 +[5.0.6]: https://github.com/arangodb/arangodb-java-driver/compare/5.0.5...5.0.6 +[5.0.5]: https://github.com/arangodb/arangodb-java-driver/compare/5.0.4...5.0.5 +[5.0.4]: https://github.com/arangodb/arangodb-java-driver/compare/5.0.3...5.0.4 +[5.0.3]: https://github.com/arangodb/arangodb-java-driver/compare/5.0.2...5.0.3 +[5.0.2]: https://github.com/arangodb/arangodb-java-driver/compare/5.0.1...5.0.2 +[5.0.1]: https://github.com/arangodb/arangodb-java-driver/compare/5.0.0...5.0.1 +[5.0.0]: https://github.com/arangodb/arangodb-java-driver/compare/4.7.3...5.0.0 +[4.7.3]: https://github.com/arangodb/arangodb-java-driver/compare/4.7.2...4.7.3 +[4.7.2]: https://github.com/arangodb/arangodb-java-driver/compare/4.7.1...4.7.2 +[4.7.1]: https://github.com/arangodb/arangodb-java-driver/compare/4.7.0...4.7.1 +[4.7.0]: https://github.com/arangodb/arangodb-java-driver/compare/4.6.1...4.7.0 +[4.6.1]: https://github.com/arangodb/arangodb-java-driver/compare/4.6.0...4.6.1 +[4.6.0]: https://github.com/arangodb/arangodb-java-driver/compare/4.5.2...4.6.0 +[4.5.2]: https://github.com/arangodb/arangodb-java-driver/compare/4.5.1...4.5.2 +[4.5.1]: https://github.com/arangodb/arangodb-java-driver/compare/4.5.0...4.5.1 +[4.5.0]: https://github.com/arangodb/arangodb-java-driver/compare/4.4.1...4.5.0 +[4.4.1]: https://github.com/arangodb/arangodb-java-driver/compare/4.4.0...4.4.1 +[4.4.0]: https://github.com/arangodb/arangodb-java-driver/compare/4.3.7...4.4.0 +[4.3.7]: https://github.com/arangodb/arangodb-java-driver/compare/4.3.6...4.3.7 +[4.3.6]: https://github.com/arangodb/arangodb-java-driver/compare/4.3.5...4.3.6 +[4.3.5]: https://github.com/arangodb/arangodb-java-driver/compare/4.3.4...4.3.5 +[4.3.4]: https://github.com/arangodb/arangodb-java-driver/compare/4.3.3...4.3.4 +[4.3.3]: https://github.com/arangodb/arangodb-java-driver/compare/4.3.2...4.3.3 +[4.3.2]: https://github.com/arangodb/arangodb-java-driver/compare/4.3.1...4.3.2 +[4.3.1]: https://github.com/arangodb/arangodb-java-driver/compare/4.3.0...4.3.1 +[4.3.0]: https://github.com/arangodb/arangodb-java-driver/compare/4.2.7...4.3.0 +[4.2.7]: https://github.com/arangodb/arangodb-java-driver/compare/4.2.6...4.2.7 +[4.2.6]: https://github.com/arangodb/arangodb-java-driver/compare/4.2.5...4.2.6 +[4.2.5]: https://github.com/arangodb/arangodb-java-driver/compare/4.2.4...4.2.5 +[4.2.4]: https://github.com/arangodb/arangodb-java-driver/compare/4.2.3...4.2.4 +[4.2.3]: https://github.com/arangodb/arangodb-java-driver/compare/4.2.2...4.2.3 +[4.2.2]: https://github.com/arangodb/arangodb-java-driver/compare/4.2.1...4.2.2 +[4.2.1]: https://github.com/arangodb/arangodb-java-driver/compare/4.2.0...4.2.1 +[4.2.0]: https://github.com/arangodb/arangodb-java-driver/compare/4.1.12...4.2.0 +[4.1.12]: https://github.com/arangodb/arangodb-java-driver/compare/4.1.11...4.1.12 +[4.1.11]: https://github.com/arangodb/arangodb-java-driver/compare/4.1.10...4.1.11 +[4.1.10]: https://github.com/arangodb/arangodb-java-driver/compare/4.1.9...4.1.10 +[4.1.9]: https://github.com/arangodb/arangodb-java-driver/compare/4.1.8...4.1.9 +[4.1.8]: https://github.com/arangodb/arangodb-java-driver/compare/4.1.7...4.1.8 +[4.1.7]: https://github.com/arangodb/arangodb-java-driver/compare/4.1.6...4.1.7 +[4.1.6]: https://github.com/arangodb/arangodb-java-driver/compare/4.1.5...4.1.6 +[4.1.5]: https://github.com/arangodb/arangodb-java-driver/compare/4.1.4...4.1.5 +[4.1.4]: https://github.com/arangodb/arangodb-java-driver/compare/4.1.3...4.1.4 +[4.1.3]: https://github.com/arangodb/arangodb-java-driver/compare/4.1.2...4.1.3 +[4.1.2]: https://github.com/arangodb/arangodb-java-driver/compare/4.1.1...4.1.2 +[4.1.1]: https://github.com/arangodb/arangodb-java-driver/compare/4.1.0...4.1.1 +[4.1.0]: https://github.com/arangodb/arangodb-java-driver/compare/4.0.0...4.1.0 +[4.0.0]: https://github.com/arangodb/arangodb-java-driver/compare/3.1.0...4.0.0 +[3.1.0]: https://github.com/arangodb/arangodb-java-driver/compare/3.0.4...3.1.0 +[3.0.4]: https://github.com/arangodb/arangodb-java-driver/compare/3.0.3...3.0.4 +[3.0.3]: https://github.com/arangodb/arangodb-java-driver/compare/3.0.2...3.0.3 +[3.0.2]: https://github.com/arangodb/arangodb-java-driver/compare/3.0.1...3.0.2 +[3.0.1]: https://github.com/arangodb/arangodb-java-driver/compare/3.0.0...3.0.1 +[3.0.0]: https://github.com/arangodb/arangodb-java-driver/compare/2.7.4...3.0.0 +[2.7.4]: https://github.com/arangodb/arangodb-java-driver/compare/2.7.3...2.7.4 +[2.7.3]: https://github.com/arangodb/arangodb-java-driver/compare/2.7.2...2.7.3 +[2.7.2]: https://github.com/arangodb/arangodb-java-driver/compare/2.7.1...2.7.2 +[2.7.1]: https://github.com/arangodb/arangodb-java-driver/compare/2.7.0...2.7.1 +[2.7.0]: https://github.com/arangodb/arangodb-java-driver/compare/2.6.9...2.7.0 +[2.6.9]: https://github.com/arangodb/arangodb-java-driver/compare/2.6.8...2.6.9 +[2.6.8]: https://github.com/arangodb/arangodb-java-driver/compare/2.5.6...2.6.8 +[2.5.6]: https://github.com/arangodb/arangodb-java-driver/compare/2.5.5...2.5.6 +[2.5.5]: https://github.com/arangodb/arangodb-java-driver/compare/2.5.4...2.5.5 +[2.5.4]: https://github.com/arangodb/arangodb-java-driver/compare/2.5.3...2.5.4 +[2.5.3]: https://github.com/arangodb/arangodb-java-driver/compare/2.5.0...2.5.3 +[2.5.0]: https://github.com/arangodb/arangodb-java-driver/compare/2.4.4...2.5.0 +[2.4.4]: https://github.com/arangodb/arangodb-java-driver/compare/2.4.3...2.4.4 +[2.4.3]: https://github.com/arangodb/arangodb-java-driver/compare/2.4.2...2.4.3 +[2.4.2]: https://github.com/arangodb/arangodb-java-driver/compare/2.4.1...2.4.2 +[2.4.1]: https://github.com/arangodb/arangodb-java-driver/compare/1.4.1...2.4.1 +[1.4.1]: https://github.com/arangodb/arangodb-java-driver/compare/1.4.0...1.4.1 +[1.4.0]: https://github.com/arangodb/arangodb-java-driver/compare/1.2.2...1.4.0 +[1.2.2]: https://github.com/arangodb/arangodb-java-driver/compare/1.2.1...1.2.2 +[1.2.1]: https://github.com/arangodb/arangodb-java-driver/compare/1.2.0...1.2.1 + diff --git a/README.md b/README.md index 28c014d20..86ab53472 100644 --- a/README.md +++ b/README.md @@ -1,794 +1,15 @@ - -![ArangoDB-Logo](https://docs.arangodb.com/assets/arangodb_logo_2016_inverted.png) - -# arangodb-java-driver - -[![Maven Central](https://maven-badges.herokuapp.com/maven-central/com.arangodb/arangodb-java-driver/badge.svg)](https://maven-badges.herokuapp.com/maven-central/com.arangodb/arangodb-java-driver) - -## Supported versions - - - - - - - - - -
arangodb-java-driverArangoDBnetwork protocolJava version
4.3.x3.0.0+VelocyStream, HTTP1.6+
4.2.x3.0.0+VelocyStream, HTTP1.6+
4.1.x3.1.0+VelocyStream1.6+
3.1.x3.1.0+HTTP1.6+
3.0.x3.0.xHTTP1.6+
2.7.42.7.x, 2.8.xHTTP1.6+
- -**Note**: VelocyStream is only supported in ArangoDB 3.1 and above. - -## Learn more -* [ArangoDB](https://www.arangodb.com/) -* [ChangeLog](ChangeLog) -* [Examples](src/test/java/com/arangodb/example) -* [Tutorial](https://www.arangodb.com/tutorials/tutorial-sync-java-driver/) -* [JavaDoc](http://arangodb.github.io/arangodb-java-driver/javadoc-4_3/index.html) -* [JavaDoc VelocyPack](http://arangodb.github.io/java-velocypack/javadoc-1_0/index.html) - -## Maven - -To add the driver to your project with maven, add the following code to your pom.xml -(please use a driver with a version number compatible to your ArangoDB server's version): - -ArangoDB 3.x.x -```XML - - - com.arangodb - arangodb-java-driver - 4.3.0 - - -``` - -If you want to test with a snapshot version (e.g. 4.3.0-SNAPSHOT), add the staging repository of oss.sonatype.org to your pom.xml: - -```XML - - - arangodb-snapshots - https://oss.sonatype.org/content/groups/staging - - -``` - -## Compile java driver - -``` -mvn clean install -DskipTests=true -Dgpg.skip=true -Dmaven.javadoc.skip=true -B -``` - -## Table of Contents - -* [Driver setup](#driver-setup) - * [Network protocol](#network-protocol) - * [SSL](#ssl) - * [Connection pooling](#connection-pooling) - * [Fallback hosts](#fallback-hosts) - * [Load Balancing](#load-balancing) - * [configure VelocyPack serialization](#configure-velocypack-serialization) - * [Java 8 types](#java-8-types) - * [Scala types](#scala-types) - * [Joda-Time](#joda-time) - * [custom serializer](#custom-serializer) -* [Manipulating databases](#manipulating-databases) - * [create database](#create-database) - * [drop database](#drop-database) -* [Manipulating collections](#manipulating-collections) - * [create collection](#create-collection) - * [drop collection](#drop-collection) - * [truncate collection](#truncate-collection) -* [Basic Document operations](#basic-document-operations) - * [insert document](#insert-document) - * [delete document](#delete-document) - * [update document](#update-document) - * [replace document](#replace-document) - * [read document as JavaBean](#read-document-as-javabean) - * [read document as VelocyPack](#read-document-as-velocypack) - * [read document as Json](#read-document-as-json) - * [read document by key](#read-document-by-key) - * [read document by id](#read-document-by-id) -* [Multi document operations](#multi-document-operations) - * [insert documents](#insert-documents) - * [delete documents](#delete-documents) - * [update documents](#update-documents) - * [replace documents](#replace-documents) -* [AQL](#aql) - * [executing an AQL statement](#executing-an-aql-statement) -* [Graphs](#graphs) - * [add graph](#add-graph) - * [delete graph](#delete-graph) - * [add vertex](#add-vertex) - * [add edge](#add-edge) -* [Foxx](#foxx) - * [call a service](#call-a-service) -* [User management](#user-management) - * [add user](#add-user) - * [delete user](#delete-user) - * [list users](#list-users) - * [grant user access](#grant-user-access) - * [revoke user access](#revoke-user-access) -* [Serialization](#serialization) - * [JavaBeans](#javabeans) - * [internal fields](#internal-fields) - * [serialized fieldnames](#serialized-fieldnames) - * [ignore fields](#ignore-fields) - * [custom serializer](#custom-serializer) - * [manually serialization](#manually-serialization) - - -# Driver setup -Setup with default configuration, this automatically loads a properties file arangodb.properties if exists in the classpath: - -``` Java - // this instance is thread-safe - ArangoDB arangoDB = new ArangoDB.Builder().build(); - -``` - - -The driver is configured with some default values: - - - - - - - - - - - -
property-keydescriptiondefault value
arangodb.hostsArangoDB hosts127.0.0.1:8529
arangodb.timeoutsocket connect timeout(millisecond)0
arangodb.userBasic Authentication User
arangodb.passwordBasic Authentication Password
arangodb.useSsluse SSL connectionfalse
arangodb.chunksizeVelocyStream Chunk content-size(bytes)30000
arangodb.connections.maxmax number of connections1 VST, 20 HTTP
arangodb.protocolused network protocolVST
- -To customize the configuration the parameters can be changed in the code... - -``` Java - ArangoDB arangoDB = new ArangoDB.Builder().host("192.168.182.50", 8888).build(); - -``` -... or with a custom properties file (my.properties) - -``` Java - InputStream in = MyClass.class.getResourceAsStream("my.properties"); - ArangoDB arangoDB = new ArangoDB.Builder().loadProperties(in).build(); - -``` - -Example for arangodb.properties: -``` Java - arangodb.hosts=127.0.0.1:8529,127.0.0.1:8529 - arangodb.user=root - arangodb.password= - -``` - -## Network protocol - -The drivers default used network protocol is the binary protocol VelocyStream which offers the best performance within the driver. To use HTTP, you have to set the configuration `useProtocol` to `Protocol.HTTP_JSON` for HTTP with Json content or `Protocol.HTTP_VPACK` for HTTP with [VelocyPack](https://github.com/arangodb/velocypack/blob/master/VelocyPack.md) content. - -``` Java - - ArangoDB arangoDB = new ArangoDB.Builder().useProtocol(Protocol.VST).build(); - -``` - -In addition to set the configuration for HTTP you have to add the apache httpclient to your classpath. - -```XML - - org.apache.httpcomponents - httpclient - 4.5.1 - -``` - -**Note**: If you are using ArangoDB 3.0.x you have to set the protocol to `Protocol.HTTP_JSON` because it is the only one supported. - -## SSL - -To use SSL, you have to set the configuration `useSsl` to `true` and set a `SSLContext`. (see [example code](../src/test/java/com/arangodb/example/ssl/SslExample.java)) - -``` Java - - ArangoDB arangoDB = new ArangoDB.Builder().useSsl(true).sslContext(sc).build(); - -``` - -## Connection Pooling - -The driver supports connection pooling for VelocyStream with a default of 1 and HTTP with a default of 20 maximum connections. To change this value use the method `maxConnections(Integer)` in `ArangoDB.Builder`. - -``` Java - - ArangoDB arangoDB = new ArangoDB.Builder().maxConnections(8).build(); - -``` - -## Fallback hosts - -The driver supports configuring multiple hosts. The first host is used to open a connection to. When this host is not reachable the next host from the list is used. To use this feature just call the method `host(String, int)` multiple times. - -``` Java - - ArangoDB arangoDB = new ArangoDB.Builder().host("host1", 8529).host("host2", 8529).build(); - -``` - -Since version 4.3 the driver support acquiring a list of known hosts in a cluster setup or a single server setup with followers. For this the driver has to be able to successfully open a connection to at least one host to get the list of hosts. Then it can use this list when fallback is needed. To use this feature just pass `true` to the method `acquireHostList(boolean)`. - -``` Java - - ArangoDB arangoDB = new ArangoDB.Builder().acquireHostList(true).build(); - -``` - -## Load Balancing - -Since version 4.3 the driver supports load balancing for cluster setups in two different ways. - -The first one is a round robin load balancing where the driver iterates through a list of known hosts and performs every request on a different host than the request before. This load balancing strategy only work when the maximun of connections is greater 1. - -``` Java - - ArangoDB arangoDB = new ArangoDB.Builder().loadBalancingStrategy(LoadBalancingStrategy.ROUND_ROBIN).maxConnections(8).build(); - -``` - -Just like the Fallback hosts feature the round robin load balancing strategy can use the `acquireHostList` configuration to acquire a list of all known hosts in the cluster. Do so only requires the manually configuration of only one host. Because this list is updated frequently it makes load balancing over the whole cluster very comfortable. - -``` Java - - ArangoDB arangoDB = new ArangoDB.Builder().loadBalancingStrategy(LoadBalancingStrategy.ROUND_ROBIN).maxConnections(8).acquireHostList(true).build(); - -``` - -The second load balancing strategy allows to pick a random host from the configured or acquired list of hosts and sticks to that host as long as the connection is open. This strategy is useful for an application - using the driver - which provides a session management where each session has its own instance of `ArangoDB` build from a global configured list of hosts. In this case it could be wanted that every sessions sticks with all its requests to the same host but not all sessions should use the same host. This load balancing strategy also works together with `acquireHostList`. - - -``` Java - - ArangoDB arangoDB = new ArangoDB.Builder().loadBalancingStrategy(LoadBalancingStrategy.ONE_RANDOM).acquireHostList(true).build(); - -``` - -## configure VelocyPack serialization - -Since version `4.1.11` you can extend the VelocyPack serialization by registering additional `VPackModule`s on `ArangoDB.Builder`. - -### Java 8 types - -Added support for: -* java.time.Instant -* java.time.LocalDate -* java.time.LocalDateTime -* java.util.Optional; -* java.util.OptionalDouble; -* java.util.OptionalInt; -* java.util.OptionalLong; - -```XML - - - com.arangodb - velocypack-module-jdk8 - 1.0.2 - - -``` - -``` Java -ArangoDB arangoDB = new ArangoDB.Builder().registerModule(new VPackJdk8Module()).build(); -``` - -### Scala types - -Added support for: -* scala.Option -* scala.collection.immutable.List -* scala.collection.immutable.Map - -```XML - - - com.arangodb - velocypack-module-scala - 1.0.1 - - -``` - -``` Scala -val arangoDB: ArangoDB = new ArangoDB.Builder().registerModule(new VPackScalaModule).build -``` - -### Joda-Time - -Added support for: -* org.joda.time.DateTime; -* org.joda.time.Instant; -* org.joda.time.LocalDate; -* org.joda.time.LocalDateTime; - -```XML - - - com.arangodb - velocypack-module-joda - 1.0.0 - - -``` - -``` Java -ArangoDB arangoDB = new ArangoDB.Builder().registerModule(new VPackJodaModule()).build(); -``` - -## custom serializer -``` Java - ArangoDB arangoDB = new ArangoDB.Builder().registerModule(new VPackModule() { - @Override - public > void setup(final C context) { - context.registerDeserializer(MyObject.class, new VPackDeserializer() { - @Override - public MyObject deserialize(VPackSlice parent,VPackSlice vpack, - VPackDeserializationContext context) throws VPackException { - MyObject obj = new MyObject(); - obj.setName(vpack.get("name").getAsString()); - return obj; - } - }); - context.registerSerializer(MyObject.class, new VPackSerializer() { - @Override - public void serialize(VPackBuilder builder,String attribute,MyObject value, - VPackSerializationContext context) throws VPackException { - builder.add(attribute, ValueType.OBJECT); - builder.add("name", value.getName()); - builder.close(); - } - }); - } - }).build(); -``` - - -# Manipulating databases - -## create database -``` Java - // create database - arangoDB.createDatabase("myDatabase"); - -``` - -## drop database -``` Java - // drop database - arangoDB.db("myDatabase").drop(); - -``` - -# Manipulating collections - -## create collection -``` Java - // create collection - arangoDB.db("myDatabase").createCollection("myCollection", null); - -``` - -## drop collection -``` Java - // delete collection - arangoDB.db("myDatabase").collection("myCollection").drop(); - -``` - -## truncate collection -``` Java - arangoDB.db("myDatabase").collection("myCollection").truncate(); -``` - -# Basic Document operations - -Every document operations works with POJOs (e.g. MyObject), VelocyPack (VPackSlice) and Json (String). - -For the next examples we use a small object: - -``` Java - public class MyObject { - - private String key; - private String name; - private int age; - - public MyObject(String name, int age) { - this(); - this.name = name; - this.age = age; - } - - public MyObject() { - super(); - } - - /* - * + getter and setter - */ - - } -``` - -## insert document -``` Java - MyObject myObject = new MyObject("Homer", 38); - arangoDB.db("myDatabase").collection("myCollection").insertDocument(myObject); - -``` - -When creating a document, the attributes of the object will be stored as key-value pair -E.g. in the previous example the object was stored as follows: -``` properties - "name" : "Homer" - "age" : "38" -``` - -## delete document -``` Java - arangoDB.db("myDatabase").collection("myCollection").deleteDocument(myObject.getKey()); - -``` - -## update document -``` Java - arangoDB.db("myDatabase").collection("myCollection").updateDocument(myObject.getKey(), myUpdatedObject); - -``` - -## replace document -``` Java - arangoDB.db("myDatabase").collection("myCollection").replaceDocument(myObject.getKey(), myObject2); - -``` - -## read document as JavaBean -``` Java - MyObject document = arangoDB.db("myDatabase").collection("myCollection").getDocument(myObject.getKey(), MyObject.class); - document.getName(); - document.getAge(); - -``` - -## read document as VelocyPack -``` Java - VPackSlice document = arangoDB.db("myDatabase").collection("myCollection").getDocument(myObject.getKey(), VPackSlice.class); - document.get("name").getAsString(); - document.get("age").getAsInt(); - -``` - -## read document as Json -``` Java - String json = arangoDB.db("myDatabase").collection("myCollection").getDocument(myObject.getKey(), String.class); - -``` - -## read document by key -``` Java - arangoDB.db("myDatabase").collection("myCollection").getDocument("myKey", MyObject.class); - -``` - -## read document by id -``` Java - arangoDB.db("myDatabase").getDocument("myCollection/myKey", MyObject.class); - -``` - -# Multi Document operations - -## insert documents -``` Java - Collection documents = new ArrayList<>; - documents.add(myObject1); - documents.add(myObject2); - documents.add(myObject3); - arangoDB.db("myDatabase").collection("myCollection").insertDocuments(documents); - -``` - -## delete documents -``` Java - Collection keys = new ArrayList<>; - keys.add(myObject1.getKey()); - keys.add(myObject2.getKey()); - keys.add(myObject3.getKey()); - arangoDB.db("myDatabase").collection("myCollection").deleteDocuments(keys); - -``` - -## update documents -``` Java - Collection documents = new ArrayList<>; - documents.add(myObject1); - documents.add(myObject2); - documents.add(myObject3); - arangoDB.db("myDatabase").collection("myCollection").updateDocuments(documents); - -``` - -## replace documents -``` Java - Collection documents = new ArrayList<>; - documents.add(myObject1); - documents.add(myObject2); - documents.add(myObject3); - arangoDB.db("myDatabase").collection("myCollection").replaceDocuments(documents); - -``` - -# AQL - -## Executing an AQL statement - -Every AQL operations works with POJOs (e.g. MyObject), VelocyPack (VPackSlice) and Json (String). - -E.g. get all Simpsons aged 3 or older in ascending order: - -``` Java - arangoDB.createDatabase("myDatabase"); - ArangoDatabase db = arangoDB.db("myDatabase"); - - db.createCollection("myCollection"); - ArangoCollection collection = db.collection("myCollection"); - - collection.insertDocument(new MyObject("Homer", 38)); - collection.insertDocument(new MyObject("Marge", 36)); - collection.insertDocument(new MyObject("Bart", 10)); - collection.insertDocument(new MyObject("Lisa", 8)); - collection.insertDocument(new MyObject("Maggie", 2)); - - Map bindVars = new HashMap<>(); - bindVars.put("age", 3); - - ArangoCursor cursor = db.query(query, bindVars, null, MyObject.class); - - for(; cursor.hasNext;) { - MyObject obj = cursor.next(); - System.out.println(obj.getName()); - } -``` - -or return the AQL result as VelocyPack: - -``` Java - ArangoCursor cursor = db.query(query, bindVars, null, VPackSlice.class); - - for(; cursor.hasNext;) { - VPackSlice obj = cursor.next(); - System.out.println(obj.get("name").getAsString()); - } -``` - -**Note**: The parameter `type` in `query()` has to match the result of the query, otherwise you get an VPackParserException. E.g. you set `type` to `BaseDocument` or a POJO and the query result is an array or simple type, you get an VPackParserException caused by VPackValueTypeException: Expecting type OBJECT. - -# Graphs - -The driver supports the [graph api](https://docs.arangodb.com/HTTP/Gharial/index.html). - -Some of the basic graph operations are described in the following: - -## add graph -A graph consists of vertices and edges (stored in collections). Which collections are used within a graph is defined via edge definitions. A graph can contain more than one edge definition, at least one is needed. - -``` Java - Collection edgeDefinitions = new ArrayList<>(); - EdgeDefinition edgeDefinition = new EdgeDefinition(); - // define the edgeCollection to store the edges - edgeDefinition.collection("myEdgeCollection"); - // define a set of collections where an edge is going out... - edgeDefinition.from("myCollection1", "myCollection2"); - - // repeat this for the collections where an edge is going into - edgeDefinition.to("myCollection1", "myCollection3"); - - edgeDefinitions.add(edgeDefinition); - - // A graph can contain additional vertex collections, defined in the set of orphan collections - GraphCreateOptions options = new GraphCreateOptions(); - options.orphanCollections("myCollection4", "myCollection5"); - - // now it's possible to create a graph - arangoDB.db("myDatabase").createGraph("myGraph", edgeDefinitions, options); - -``` - -## delete graph - -A graph can be deleted by its name - -``` Java - arangoDB.db("myDatabase").graph("myGraph").drop(); -``` - -## add vertex - -Vertices are stored in the vertex collections defined above. - -``` Java - MyObject myObject1 = new MyObject("Homer", 38); - MyObject myObject2 = new MyObject("Marge", 36); - arangoDB.db("myDatabase").graph("myGraph").vertexCollection("collection1").insertVertex(myObject1, null); - arangoDB.db("myDatabase").graph("myGraph").vertexCollection("collection3").insertVertex(myObject2, null); - -``` - -## add edge - -Now an edge can be created to set a relation between vertices - -``` Java - arangoDB.db("myDatabase").graph("myGraph").edgeCollection("myEdgeCollection").insertEdge(myEdgeObject, null); - -``` - -# Foxx - -## call a service -``` Java - Request request = new Request("mydb", RequestType.GET, "/my/foxx/service") - Response response = arangoDB.execute(request); - -``` - -# User management - -If you are using [authentication] (https://docs.arangodb.com/Manual/GettingStarted/Authentication.html) you can manage users with the driver. - -## add user -``` Java - //username, password - arangoDB.createUser("myUser", "myPassword"); -``` - -## delete user -``` Java - arangoDB.deleteUser("myUser"); -``` - -## list users -``` Java - Collection users = arangoDB.getUsers(); - for(UserResult user : users) { - System.out.println(user.getUser()) - } -``` - -## grant user access -``` Java - arangoDB.db("myDatabase").grantAccess("myUser"); -```` - -## revoke user access -``` Java - arangoDB.db("myDatabase").revokeAccess("myUser"); -```` - -# Serialization - -## JavaBeans -The driver can serialize/deserialize JavaBeans. They need at least a constructor without parameter. - -``` Java - public class MyObject { - - private String name; - private Gender gender; - private int age; - - public MyObject() { - super(); - } - - } -``` - -## internal fields -To use Arango-internal fields (like _id, _key, _rev, _from, _to) in your JavaBeans, use the annotation `DocumentField`. - -``` Java - public class MyObject { - - @DocumentField(Type.KEY) - private String key; - - private String name; - private Gender gender; - private int age; - - public MyObject() { - super(); - } - - } -``` - -## serialized fieldnames -To use a different serialized name for a field, use the annotation `SerializedName`. - -``` Java - public class MyObject { - - @SerializedName("title") - private String name; - - private Gender gender; - private int age; - - public MyObject() { - super(); - } - - } -``` - -## ignore fields -To ignore fields at serialization/deserialization, use the annotation `Expose` - -``` Java - public class MyObject { - - @Expose - private String name; - @Expose(serialize = true, deserialize = false) - private Gender gender; - private int age; - - public MyObject() { - super(); - } - - } -``` - -## custom serializer -``` Java - ArangoDB arangoDB = new ArangoDB.Builder().registerModule(new VPackModule() { - @Override - public > void setup(final C context) { - context.registerDeserializer(MyObject.class, new VPackDeserializer() { - @Override - public MyObject deserialize(VPackSlice parent,VPackSlice vpack, - VPackDeserializationContext context) throws VPackException { - MyObject obj = new MyObject(); - obj.setName(vpack.get("name").getAsString()); - return obj; - } - }); - context.registerSerializer(MyObject.class, new VPackSerializer() { - @Override - public void serialize(VPackBuilder builder,String attribute,MyObject value, - VPackSerializationContext context) throws VPackException { - builder.add(attribute, ValueType.OBJECT); - builder.add("name", value.getName()); - builder.close(); - } - }); - } - }).build(); -``` - -## manually serialization -To de-/serialize from and to VelocyPack before or after a database call, use the `ArangoUtil` from the method `util()` in `ArangoDB`, `ArangoDatabase`, `ArangoCollection`, `ArangoGraph`, `ArangoEdgeCollection`or `ArangoVertexCollection`. - -``` Java - ArangoDB arangoDB = new ArangoDB.Builder(); - VPackSlice vpack = arangoDB.util().serialize(myObj); -``` - -``` Java - ArangoDB arangoDB = new ArangoDB.Builder(); - MyObject myObj = arangoDB.util().deserialize(vpack, MyObject.class); -``` +![ArangoDB-Logo](https://user-images.githubusercontent.com/3998723/207981337-79d49127-48fc-4c7c-9411-8a688edca1dd.png) + +# ArangoDB Java Driver + +[![Maven Central](https://maven-badges.herokuapp.com/maven-central/com.arangodb/arangodb-java-driver/badge.svg)](https://maven-badges.herokuapp.com/maven-central/com.arangodb/arangodb-java-driver) +[![CircleCI](https://dl.circleci.com/status-badge/img/gh/arangodb/arangodb-java-driver/tree/main.svg?style=svg)](https://dl.circleci.com/status-badge/redirect/gh/arangodb/arangodb-java-driver/tree/main) + +The official [ArangoDB](https://www.arangodb.com/) Java Driver. + +## Learn more + +- [ChangeLog](ChangeLog.md) +- [Examples](test-non-functional/src/test/java/example) +- [Documentation and Tutorial](https://docs.arangodb.com/stable/develop/drivers/java/) +- [JavaDoc](https://www.javadoc.io/doc/com.arangodb/arangodb-java-driver/latest/index.html) diff --git a/core/pom.xml b/core/pom.xml new file mode 100644 index 000000000..346a5156f --- /dev/null +++ b/core/pom.xml @@ -0,0 +1,100 @@ + + + 4.0.0 + + + ../release-parent + com.arangodb + release-parent + 7.22.0 + + + core + core + Core module for ArangoDB Java Driver + + + com.arangodb.core + + + + + org.slf4j + slf4j-api + compile + + + com.fasterxml.jackson.core + jackson-databind + compile + + + com.fasterxml.jackson.core + jackson-core + compile + + + com.fasterxml.jackson.core + jackson-annotations + compile + + + com.fasterxml.jackson.datatype + jackson-datatype-jakarta-jsonp + compile + + + com.google.code.findbugs + jsr305 + provided + + + + + + + com.google.code.maven-replacer-plugin + replacer + + + generate-sources + + replace + + + + + ${project.basedir}/src/main/java/com/arangodb/PackageVersion.java.in + ${project.build.directory}/generated-sources/replacer/com/arangodb/PackageVersion.java + + + + @project.version@ + ${project.version} + + + + + + org.codehaus.mojo + build-helper-maven-plugin + + + generate-sources + + add-source + + + + ${project.build.directory}/generated-sources/replacer + + + + + + + + + \ No newline at end of file diff --git a/core/src/main/java/com/arangodb/ArangoCollection.java b/core/src/main/java/com/arangodb/ArangoCollection.java new file mode 100644 index 000000000..6990eebbe --- /dev/null +++ b/core/src/main/java/com/arangodb/ArangoCollection.java @@ -0,0 +1,946 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb; + +import com.arangodb.entity.*; +import com.arangodb.model.*; +import com.arangodb.util.RawData; + +import javax.annotation.concurrent.ThreadSafe; +import java.util.Collection; + +/** + * Interface for operations on ArangoDB collection level. + * + * @author Mark Vollmary + * @author Heiko Kernbach + * @author Michele Rastelli + * @see Collection API Documentation + */ +@ThreadSafe +public interface ArangoCollection extends ArangoSerdeAccessor { + + /** + * The the handler of the database the collection is within + * + * @return database handler + */ + ArangoDatabase db(); + + /** + * The name of the collection + * + * @return collection name + */ + String name(); + + /** + * Creates a new document from the given document, unless there is already a document with the _key given. If no + * _key is given, a new unique _key is generated automatically. + * + * @param value A representation of a single document (POJO or {@link com.arangodb.util.RawData} + * @return information about the document + * @see API + * Documentation + */ + DocumentCreateEntity insertDocument(Object value); + + /** + * Creates a new document from the given document, unless there is already a document with the _key given. If no + * _key is given, a new unique _key is generated automatically. + * + * @param value A representation of a single document (POJO or {@link com.arangodb.util.RawData}) + * @param options Additional options + * @return information about the document + * @see API + * Documentation + */ + DocumentCreateEntity insertDocument(T value, DocumentCreateOptions options); + + /** + * Creates a new document from the given document, unless there is already a document with the _key given. If no + * _key is given, a new unique _key is generated automatically. + * + * @param value A representation of a single document (POJO or {@link com.arangodb.util.RawData}) + * @param options Additional options + * @param type Deserialization target type for the returned documents. + * @return information about the document + * @see API + * Documentation + */ + DocumentCreateEntity insertDocument(T value, DocumentCreateOptions options, Class type); + + /** + * Creates new documents from the given documents, unless there is already a document with the _key given. If no + * _key is given, a new unique _key is generated automatically. + * + * @param values Raw data representing a collection of documents + * @return information about the documents + * @see API + * Documentation + */ + MultiDocumentEntity> insertDocuments(RawData values); + + /** + * Creates new documents from the given documents, unless there is already a document with the _key given. If no + * _key is given, a new unique _key is generated automatically. + * + * @param values Raw data representing a collection of documents + * @param options Additional options + * @return information about the documents + * @see API + * Documentation + */ + MultiDocumentEntity> insertDocuments( + RawData values, DocumentCreateOptions options); + + /** + * Creates new documents from the given documents, unless there is already a document with the _key given. If no + * _key is given, a new unique _key is generated automatically. + * + * @param values A List of documents + * @return information about the documents + * @see API + * Documentation + */ + MultiDocumentEntity> insertDocuments(Iterable values); + + /** + * Creates new documents from the given documents, unless there is already a document with the _key given. If no + * _key is given, a new unique _key is generated automatically. + * + * @param values A List of documents (POJO or {@link com.arangodb.util.RawData}) + * @param options Additional options + * @return information about the documents + * @see API + * Documentation + */ + MultiDocumentEntity> insertDocuments( + Iterable values, DocumentCreateOptions options); + + /** + * Creates new documents from the given documents, unless there is already a document with the _key given. If no + * _key is given, a new unique _key is generated automatically. + * + * @param values A List of documents (POJO or {@link com.arangodb.util.RawData}) + * @param options Additional options + * @param type Deserialization target type for the returned documents. + * @return information about the documents + * @see API + * Documentation + */ + MultiDocumentEntity> insertDocuments( + Iterable values, DocumentCreateOptions options, Class type); + + /** + * Bulk imports the given values into the collection. + * + * @param values A List of documents (POJO or {@link com.arangodb.util.RawData}) + * @return information about the import + * @see API + * Documentation + */ + DocumentImportEntity importDocuments(Iterable values); + + /** + * Bulk imports the given values into the collection. + * + * @param values A List of documents (POJO or {@link com.arangodb.util.RawData}) + * @param options Additional options, can be null + * @return information about the import + * @see API + * Documentation + */ + DocumentImportEntity importDocuments(Iterable values, DocumentImportOptions options); + + /** + * Bulk imports the given values into the collection. + * + * @param values Raw data representing a collection of documents + * @return information about the import + * @see API + * Documentation + */ + DocumentImportEntity importDocuments(RawData values); + + /** + * Bulk imports the given values into the collection. + * + * @param values Raw data representing a collection of documents + * @param options Additional options, can be null + * @return information about the import + * @see API + * Documentation + */ + DocumentImportEntity importDocuments(RawData values, DocumentImportOptions options); + + /** + * Retrieves the document with the given {@code key} from the collection. + * + * @param key The key of the document + * @param type The type of the document (POJO or {@link com.arangodb.util.RawData}) + * @return the document identified by the key + * @see API + * Documentation + */ + T getDocument(String key, Class type); + + /** + * Retrieves the document with the given {@code key} from the collection. + * + * @param key The key of the document + * @param type The type of the document (POJO or {@link com.arangodb.util.RawData}) + * @param options Additional options, can be null + * @return the document identified by the key + * @see API + * Documentation + */ + T getDocument(String key, Class type, DocumentReadOptions options); + + /** + * Retrieves multiple documents with the given {@code _key} from the collection. + * + * @param keys The keys of the documents + * @param type The type of the documents (POJO or {@link com.arangodb.util.RawData}) + * @return the documents and possible errors + * @see API + * Documentation + */ + MultiDocumentEntity getDocuments(Iterable keys, Class type); + + /** + * Retrieves multiple documents with the given {@code _key} from the collection. + * + * @param keys The keys of the documents + * @param type The type of the documents (POJO or {@link com.arangodb.util.RawData}) + * @param options Additional options, can be null + * @return the documents and possible errors + * @see API + * Documentation + */ + MultiDocumentEntity getDocuments(Iterable keys, Class type, DocumentReadOptions options); + + /** + * Replaces the document with {@code key} with the one in the body, provided there is such a document and no + * precondition is violated + * + * @param key The key of the document + * @param value A representation of a single document (POJO or {@link com.arangodb.util.RawData}) + * @return information about the document + * @see + * API + * Documentation + */ + DocumentUpdateEntity replaceDocument(String key, Object value); + + /** + * Replaces the document with {@code key} with the one in the body, provided there is such a document and no + * precondition is violated + * + * @param key The key of the document + * @param value A representation of a single document (POJO or {@link com.arangodb.util.RawData}) + * @param options Additional options + * @return information about the document + * @see + * API + * Documentation + */ + DocumentUpdateEntity replaceDocument(String key, T value, DocumentReplaceOptions options); + + /** + * Replaces the document with {@code key} with the one in the body, provided there is such a document and no + * precondition is violated + * + * @param key The key of the document + * @param value A representation of a single document (POJO or {@link com.arangodb.util.RawData}) + * @param options Additional options + * @param type Deserialization target type for the returned documents. + * @return information about the document + * @see + * API + * Documentation + */ + DocumentUpdateEntity replaceDocument(String key, T value, DocumentReplaceOptions options, Class type); + + /** + * Replaces multiple documents in the specified collection with the ones in the values, the replaced documents are + * specified by the _key attributes in the documents in values. + * + * @param values Raw data representing a collection of documents + * @return information about the documents + * @see + * API + * Documentation + */ + MultiDocumentEntity> replaceDocuments(RawData values); + + /** + * Replaces multiple documents in the specified collection with the ones in the values, the replaced documents are + * specified by the _key attributes in the documents in values. + * + * @param values Raw data representing a collection of documents + * @param options Additional options + * @return information about the documents + * @see + * API + * Documentation + */ + MultiDocumentEntity> replaceDocuments( + RawData values, DocumentReplaceOptions options); + + /** + * Replaces multiple documents in the specified collection with the ones in the values, the replaced documents are + * specified by the _key attributes in the documents in values. + * + * @param values A List of documents (POJO or {@link com.arangodb.util.RawData}) + * @return information about the documents + * @see + * API + * Documentation + */ + MultiDocumentEntity> replaceDocuments(Iterable values); + + /** + * Replaces multiple documents in the specified collection with the ones in the values, the replaced documents are + * specified by the _key attributes in the documents in values. + * + * @param values A List of documents (POJO or {@link com.arangodb.util.RawData}) + * @param options Additional options + * @return information about the documents + * @see + * API + * Documentation + */ + MultiDocumentEntity> replaceDocuments( + Iterable values, DocumentReplaceOptions options); + + /** + * Replaces multiple documents in the specified collection with the ones in the values, the replaced documents are + * specified by the _key attributes in the documents in values. + * + * @param values A List of documents (POJO or {@link com.arangodb.util.RawData}) + * @param options Additional options + * @param type Deserialization target type for the returned documents. + * @return information about the documents + * @see + * API + * Documentation + */ + MultiDocumentEntity> replaceDocuments( + Iterable values, DocumentReplaceOptions options, Class type); + + /** + * Partially updates the document identified by document-key. The value must contain a document with the attributes + * to patch (the patch document). All attributes from the patch document will be added to the existing document if + * they do not yet exist, and overwritten in the existing document if they do exist there. + * + * @param key The key of the document + * @param value A representation of a single document (POJO or {@link com.arangodb.util.RawData}) + * @return information about the document + * @see API + * Documentation + */ + DocumentUpdateEntity updateDocument(String key, Object value); + + /** + * Partially updates the document identified by document-key. The value must contain a document with the attributes + * to patch (the patch document). All attributes from the patch document will be added to the existing document if + * they do not yet exist, and overwritten in the existing document if they do exist there. + * + * @param key The key of the document + * @param value A representation of a single document (POJO or {@link com.arangodb.util.RawData}) + * @param options Additional options + * @return information about the document + * @see API + * Documentation + */ + DocumentUpdateEntity updateDocument(String key, T value, DocumentUpdateOptions options); + + /** + * Partially updates the document identified by document-key. The value must contain a document with the attributes + * to patch (the patch document). All attributes from the patch document will be added to the existing document if + * they do not yet exist, and overwritten in the existing document if they do exist there. + * + * @param key The key of the document + * @param value A representation of a single document (POJO or {@link com.arangodb.util.RawData}) + * @param options Additional options + * @param returnType Type of the returned newDocument and/or oldDocument + * @return information about the document + * @see API + * Documentation + */ + DocumentUpdateEntity updateDocument(String key, Object value, DocumentUpdateOptions options, + Class returnType); + + /** + * Partially updates documents, the documents to update are specified by the _key attributes in the objects on + * values. Vales must contain a list of document updates with the attributes to patch (the patch documents). All + * attributes from the patch documents will be added to the existing documents if they do not yet exist, and + * overwritten in the existing documents if they do exist there. + * + * @param values Raw data representing a collection of documents + * @return information about the documents + * @see + * API + * Documentation + */ + MultiDocumentEntity> updateDocuments(RawData values); + + /** + * Partially updates documents, the documents to update are specified by the _key attributes in the objects on + * values. Vales must contain a list of document updates with the attributes to patch (the patch documents). All + * attributes from the patch documents will be added to the existing documents if they do not yet exist, and + * overwritten in the existing documents if they do exist there. + * + * @param values Raw data representing a collection of documents + * @param options Additional options + * @return information about the documents + * @see + * API + * Documentation + */ + MultiDocumentEntity> updateDocuments( + RawData values, DocumentUpdateOptions options); + + /** + * Partially updates documents, the documents to update are specified by the _key attributes in the objects on + * values. Vales must contain a list of document updates with the attributes to patch (the patch documents). All + * attributes from the patch documents will be added to the existing documents if they do not yet exist, and + * overwritten in the existing documents if they do exist there. + * + * @param values A list of documents (POJO or {@link com.arangodb.util.RawData}) + * @return information about the documents + * @see + * API + * Documentation + */ + MultiDocumentEntity> updateDocuments(Iterable values); + + /** + * Partially updates documents, the documents to update are specified by the _key attributes in the objects on + * values. Vales must contain a list of document updates with the attributes to patch (the patch documents). All + * attributes from the patch documents will be added to the existing documents if they do not yet exist, and + * overwritten in the existing documents if they do exist there. + * + * @param values A list of documents (POJO or {@link com.arangodb.util.RawData}) + * @param options Additional options + * @return information about the documents + * @see + * API + * Documentation + */ + MultiDocumentEntity> updateDocuments( + Iterable values, DocumentUpdateOptions options); + + /** + * Partially updates documents, the documents to update are specified by the _key attributes in the objects on + * values. Vales must contain a list of document updates with the attributes to patch (the patch documents). All + * attributes from the patch documents will be added to the existing documents if they do not yet exist, and + * overwritten in the existing documents if they do exist there. + * + * @param values A list of documents (POJO or {@link com.arangodb.util.RawData}) + * @param options Additional options + * @param returnType Type of the returned newDocument and/or oldDocument + * @return information about the documents + * @see + * API + * Documentation + */ + MultiDocumentEntity> updateDocuments( + Iterable values, DocumentUpdateOptions options, Class returnType); + + /** + * Deletes the document with the given {@code key} from the collection. + * + * @param key The key of the document + * @return information about the document + * @see + * API + * Documentation + */ + DocumentDeleteEntity deleteDocument(String key); + + /** + * Deletes the document with the given {@code key} from the collection. + * + * @param key The key of the document + * @param options Additional options + * @return information about the document + * @see + * API + * Documentation + */ + DocumentDeleteEntity deleteDocument(String key, DocumentDeleteOptions options); + + /** + * Deletes the document with the given {@code key} from the collection. + * + * @param key The key of the document + * @param type Deserialization target type for the returned documents. + * @param options Additional options + * @return information about the document + * @see + * API + * Documentation + */ + DocumentDeleteEntity deleteDocument(String key, DocumentDeleteOptions options, Class type); + + /** + * Deletes multiple documents from the collection. + * + * @param values Raw data representing the keys of the documents or the documents themselves + * @return information about the documents + * @see API + * Documentation + */ + MultiDocumentEntity> deleteDocuments(RawData values); + + /** + * Deletes multiple documents from the collection. + * + * @param values Raw data representing the keys of the documents or the documents themselves + * @param options Additional options + * @return information about the documents + * @see API + * Documentation + */ + MultiDocumentEntity> deleteDocuments( + RawData values, DocumentDeleteOptions options); + + /** + * Deletes multiple documents from the collection. + * + * @param values The keys of the documents or the documents themselves + * @return information about the documents + * @see API + * Documentation + */ + MultiDocumentEntity> deleteDocuments(Iterable values); + + /** + * Deletes multiple documents from the collection. + * + * @param values The keys of the documents or the documents themselves + * @param options Additional options + * @return information about the documents + * @see API + * Documentation + */ + MultiDocumentEntity> deleteDocuments( + Iterable values, DocumentDeleteOptions options); + + /** + * Deletes multiple documents from the collection. + * + * @param values The keys of the documents or the documents themselves + * @param type Deserialization target type for the returned documents. + * @param options Additional options + * @return information about the documents + * @see API + * Documentation + */ + MultiDocumentEntity> deleteDocuments( + Iterable values, DocumentDeleteOptions options, Class type); + + /** + * Checks if the document exists by reading a single document head + * + * @param key The key of the document + * @return true if the document was found, otherwise false + * @see API + * Documentation + */ + Boolean documentExists(String key); + + /** + * Checks if the document exists by reading a single document head + * + * @param key The key of the document + * @param options Additional options, can be null + * @return true if the document was found, otherwise false + * @see API + * Documentation + */ + Boolean documentExists(String key, DocumentExistsOptions options); + + /** + * Fetches information about the index with the given {@code id} and returns it. + *
+ * Note: inverted indexes are not returned by this method. Use + * {@link ArangoCollection#getInvertedIndex(String)} instead. + * + * @param id The index-handle + * @return information about the index + * @see + * API Documentation + */ + IndexEntity getIndex(String id); + + /** + * Fetches information about the inverted index with the given {@code id} and returns it. + * + * @param id The index-handle + * @return information about the index + * @see API Documentation + * @since ArangoDB 3.10 + */ + InvertedIndexEntity getInvertedIndex(String id); + + /** + * Deletes the index with the given {@code id} from the collection. + * + * @param id The index-handle + * @return the id of the index + * @see + * API Documentation + */ + String deleteIndex(String id); + + /** + * Creates a persistent index for the collection, if it does not already exist. + * + * @param fields A list of attribute paths + * @param options Additional options, can be null + * @return information about the index + * @see API + * Documentation + */ + IndexEntity ensurePersistentIndex(Iterable fields, PersistentIndexOptions options); + + /** + * Creates a geo-spatial index for the collection, if it does not already exist. + * + * @param fields A list of attribute paths + * @param options Additional options, can be null + * @return information about the index + * @see API + * Documentation + */ + IndexEntity ensureGeoIndex(Iterable fields, GeoIndexOptions options); + + /** + * Creates a fulltext index for the collection, if it does not already exist. + * + * @param fields A list of attribute paths + * @param options Additional options, can be null + * @return information about the index + * @see API + * Documentation + * @deprecated since ArangoDB 3.10, use ArangoSearch or Inverted indexes instead. + */ + @Deprecated + IndexEntity ensureFulltextIndex(Iterable fields, FulltextIndexOptions options); + + /** + * Creates a ttl index for the collection, if it does not already exist. + * + * @param fields A list of attribute paths + * @param options Additional options, can be null + * @return information about the index + * @see API + * Documentation + */ + IndexEntity ensureTtlIndex(Iterable fields, TtlIndexOptions options); + + /** + * Creates a ZKD multi-dimensional index for the collection, if it does not already exist. + * Note that zkd indexes are an experimental feature in ArangoDB 3.9. + * + * @param fields A list of attribute paths + * @param options Additional options, can be null + * @return information about the index + * @see API Documentation + * @since ArangoDB 3.9 + * @deprecated since ArangoDB 3.12, use {@link #ensureMDIndex(Iterable, MDIndexOptions)} or + * {@link #ensureMDPrefixedIndex(Iterable, MDPrefixedIndexOptions)} instead. + */ + @Deprecated + IndexEntity ensureZKDIndex(Iterable fields, ZKDIndexOptions options); + + /** + * Creates a multi-dimensional index for the collection, if it does not already exist. + * + * @param fields A list of attribute names used for each dimension + * @param options Additional options, can be null. + * + * @return information about the index + * @see API Documentation + * @since ArangoDB 3.12 + */ + IndexEntity ensureMDIndex(Iterable fields, MDIndexOptions options); + + /** + * Creates a multi-dimensional prefixed index for the collection, if it does not already exist. + * + * @param fields A list of attribute names used for each dimension + * @param options Additional options, cannot be null. + * + * @return information about the index + * @see API Documentation + * @since ArangoDB 3.12 + */ + IndexEntity ensureMDPrefixedIndex(Iterable fields, MDPrefixedIndexOptions options); + + /** + * Creates an inverted index for the collection, if it does not already exist. + * + * @param options index creation options + * @return information about the index + * @see API Documentation + * @since ArangoDB 3.10 + */ + InvertedIndexEntity ensureInvertedIndex(InvertedIndexOptions options); + + /** + * Fetches a list of all indexes on this collection. + *
+ * Note: inverted indexes are not returned by this method. Use + * {@link ArangoCollection#getInvertedIndexes()} instead. + * + * @return information about the indexes + * @see API + * Documentation + */ + Collection getIndexes(); + + /** + * Fetches a list of all inverted indexes on this collection. + * + * @return information about the indexes + * @see API + * Documentation + * @since ArangoDB 3.10 + */ + Collection getInvertedIndexes(); + + /** + * Checks whether the collection exists + * + * @return true if the collection exists, otherwise false + * @see API + * Documentation + */ + boolean exists(); + + /** + * Removes all documents from the collection, but leaves the indexes intact + * + * @return information about the collection + * @see API + * Documentation + */ + CollectionEntity truncate(); + + /** + * Removes all documents from the collection, but leaves the indexes intact + * + * @param options + * @return information about the collection + * @see API + * Documentation + * @since ArangoDB 3.5.0 + */ + CollectionEntity truncate(CollectionTruncateOptions options); + + /** + * Counts the documents in a collection + * + * @return information about the collection, including the number of documents + * @see API + * Documentation + */ + CollectionPropertiesEntity count(); + + /** + * Counts the documents in a collection + * + * @param options + * @return information about the collection, including the number of documents + * @see API + * Documentation + * @since ArangoDB 3.5.0 + */ + CollectionPropertiesEntity count(CollectionCountOptions options); + + /** + * Creates a collection for this collection's name, then returns collection information from the server. + * + * @return information about the collection + * @see API + * Documentation + */ + CollectionEntity create(); + + /** + * Creates a collection with the given {@code options} for this collection's name, then returns collection + * information from the server. + * + * @param options Additional options, can be null + * @return information about the collection + * @see API + * Documentation + */ + CollectionEntity create(CollectionCreateOptions options); + + /** + * Deletes the collection from the database. + * + * @see API + * Documentation + */ + void drop(); + + /** + * Deletes the collection from the database. + * + * @param isSystem Whether or not the collection to drop is a system collection. This parameter must be set to + * true in + * order to drop a system collection. + * @see API + * Documentation + * @since ArangoDB 3.1.0 + */ + void drop(boolean isSystem); + + /** + * Returns information about the collection + * + * @return information about the collection + * @see API + * Documentation + */ + CollectionEntity getInfo(); + + /** + * Reads the properties of the specified collection + * + * @return properties of the collection + * @see API + * Documentation + */ + CollectionPropertiesEntity getProperties(); + + /** + * Changes the properties of the collection + * + * @param options Additional options, can be null + * @return properties of the collection + * @see API + * Documentation + */ + CollectionPropertiesEntity changeProperties(CollectionPropertiesOptions options); + + /** + * Renames the collection + * + * @param newName The new name + * @return information about the collection + * @see API + * Documentation + */ + CollectionEntity rename(String newName); + + /** + * Returns the responsible shard for the document. + * Please note that this API is only meaningful and available on a cluster coordinator. + * + * @param value A projection of the document containing at least the shard key (_key or a custom attribute) for + * which the responsible shard should be determined + * @return information about the responsible shard + * @see + * + * API Documentation + * @since ArangoDB 3.5.0 + */ + ShardEntity getResponsibleShard(final Object value); + + /** + * Retrieve the collections revision + * + * @return information about the collection, including the collections revision + * @see + * API + * Documentation + */ + CollectionRevisionEntity getRevision(); + + /** + * Grants or revoke access to the collection for user user. You need permission to the _system database in order to + * execute this call. + * + * @param user The name of the user + * @param permissions The permissions the user grant + * @see API + * Documentation + */ + void grantAccess(String user, Permissions permissions); + + /** + * Revokes access to the collection for user user. You need permission to the _system database in order to execute + * this call. + * + * @param user The name of the user + * @see API + * Documentation + */ + void revokeAccess(String user); + + /** + * Clear the collection access level, revert back to the default access level. + * + * @param user The name of the user + * @see API + * Documentation + * @since ArangoDB 3.2.0 + */ + void resetAccess(String user); + + /** + * Get the collection access level + * + * @param user The name of the user + * @return permissions of the user + * @see + * + * API Documentation + * @since ArangoDB 3.2.0 + */ + Permissions getPermissions(String user); + +} diff --git a/core/src/main/java/com/arangodb/ArangoCollectionAsync.java b/core/src/main/java/com/arangodb/ArangoCollectionAsync.java new file mode 100644 index 000000000..8de03b874 --- /dev/null +++ b/core/src/main/java/com/arangodb/ArangoCollectionAsync.java @@ -0,0 +1,435 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb; + +import com.arangodb.entity.*; +import com.arangodb.model.*; +import com.arangodb.util.RawData; + +import javax.annotation.concurrent.ThreadSafe; +import java.util.Collection; +import java.util.concurrent.CompletableFuture; + +/** + * Asynchronous version of {@link ArangoCollection} + */ +@ThreadSafe +public interface ArangoCollectionAsync extends ArangoSerdeAccessor { + + /** + * @return database async API + */ + ArangoDatabaseAsync db(); + + /** + * @return collection name + */ + String name(); + + /** + * Asynchronous version of {@link ArangoCollection#insertDocument(Object)} + */ + CompletableFuture> insertDocument(Object value); + + /** + * Asynchronous version of {@link ArangoCollection#insertDocument(Object, DocumentCreateOptions)} + */ + CompletableFuture> insertDocument(T value, DocumentCreateOptions options); + + /** + * Asynchronous version of {@link ArangoCollection#insertDocument(Object, DocumentCreateOptions, Class)} + */ + CompletableFuture> insertDocument(T value, DocumentCreateOptions options, Class type); + + /** + * Asynchronous version of {@link ArangoCollection#insertDocuments(RawData)} + */ + CompletableFuture>> insertDocuments(RawData values); + + /** + * Asynchronous version of {@link ArangoCollection#insertDocuments(RawData, DocumentCreateOptions)} + */ + CompletableFuture>> insertDocuments( + RawData values, DocumentCreateOptions options); + + /** + * Asynchronous version of {@link ArangoCollection#insertDocuments(Iterable)} + */ + CompletableFuture>> insertDocuments(Iterable values); + + /** + * Asynchronous version of {@link ArangoCollection#insertDocuments(Iterable, DocumentCreateOptions)} + */ + CompletableFuture>> insertDocuments( + Iterable values, DocumentCreateOptions options); + + /** + * Asynchronous version of {@link ArangoCollection#insertDocuments(Iterable, DocumentCreateOptions, Class)} + */ + CompletableFuture>> insertDocuments( + Iterable values, DocumentCreateOptions options, Class type); + + /** + * Asynchronous version of {@link ArangoCollection#importDocuments(Iterable)} + */ + CompletableFuture importDocuments(Iterable values); + + /** + * Asynchronous version of {@link ArangoCollection#importDocuments(Iterable, DocumentImportOptions)} + */ + CompletableFuture importDocuments(Iterable values, DocumentImportOptions options); + + /** + * Asynchronous version of {@link ArangoCollection#importDocuments(RawData)} + */ + CompletableFuture importDocuments(RawData values); + + /** + * Asynchronous version of {@link ArangoCollection#importDocuments(RawData, DocumentImportOptions)} + */ + CompletableFuture importDocuments(RawData values, DocumentImportOptions options); + + /** + * Asynchronous version of {@link ArangoCollection#getDocument(String, Class)} + */ + CompletableFuture getDocument(String key, Class type); + + /** + * Asynchronous version of {@link ArangoCollection#getDocument(String, Class, DocumentReadOptions)} + */ + CompletableFuture getDocument(String key, Class type, DocumentReadOptions options); + + /** + * Asynchronous version of {@link ArangoCollection#getDocuments(Iterable, Class)} + */ + CompletableFuture> getDocuments(Iterable keys, Class type); + + /** + * Asynchronous version of {@link ArangoCollection#getDocuments(Iterable, Class, DocumentReadOptions)} + */ + CompletableFuture> getDocuments(Iterable keys, Class type, DocumentReadOptions options); + + /** + * Asynchronous version of {@link ArangoCollection#replaceDocument(String, Object)} + */ + CompletableFuture> replaceDocument(String key, Object value); + + /** + * Asynchronous version of {@link ArangoCollection#replaceDocument(String, Object, DocumentReplaceOptions)} + */ + CompletableFuture> replaceDocument(String key, T value, DocumentReplaceOptions options); + + /** + * Asynchronous version of {@link ArangoCollection#replaceDocument(String, Object, DocumentReplaceOptions, Class)} + */ + CompletableFuture> replaceDocument(String key, T value, DocumentReplaceOptions options, Class type); + + /** + * Asynchronous version of {@link ArangoCollection#replaceDocuments(RawData)} + */ + CompletableFuture>> replaceDocuments(RawData values); + + /** + * Asynchronous version of {@link ArangoCollection#replaceDocuments(RawData, DocumentReplaceOptions)} + */ + CompletableFuture>> replaceDocuments( + RawData values, DocumentReplaceOptions options); + + /** + * Asynchronous version of {@link ArangoCollection#replaceDocuments(Iterable)} )} + */ + CompletableFuture>> replaceDocuments(Iterable values); + + /** + * Asynchronous version of {@link ArangoCollection#replaceDocuments(Iterable, DocumentReplaceOptions)} + */ + CompletableFuture>> replaceDocuments( + Iterable values, DocumentReplaceOptions options); + + /** + * Asynchronous version of {@link ArangoCollection#replaceDocuments(Iterable, DocumentReplaceOptions, Class)} + */ + CompletableFuture>> replaceDocuments( + Iterable values, DocumentReplaceOptions options, Class type); + + /** + * Asynchronous version of {@link ArangoCollection#updateDocument(String, Object)} + */ + CompletableFuture> updateDocument(String key, Object value); + + /** + * Asynchronous version of {@link ArangoCollection#updateDocument(String, Object, DocumentUpdateOptions)} + */ + CompletableFuture> updateDocument(String key, T value, DocumentUpdateOptions options); + + /** + * Asynchronous version of {@link ArangoCollection#updateDocument(String, Object, DocumentUpdateOptions, Class)} + */ + CompletableFuture> updateDocument(String key, Object value, DocumentUpdateOptions options, + Class returnType); + + /** + * Asynchronous version of {@link ArangoCollection#updateDocuments(RawData)} + */ + CompletableFuture>> updateDocuments(RawData values); + + /** + * Asynchronous version of {@link ArangoCollection#updateDocuments(RawData, DocumentUpdateOptions)} + */ + CompletableFuture>> updateDocuments( + RawData values, DocumentUpdateOptions options); + + /** + * Asynchronous version of {@link ArangoCollection#updateDocuments(Iterable)} + */ + CompletableFuture>> updateDocuments(Iterable values); + + /** + * Asynchronous version of {@link ArangoCollection#updateDocuments(Iterable, DocumentUpdateOptions)} + */ + CompletableFuture>> updateDocuments( + Iterable values, DocumentUpdateOptions options); + + /** + * Asynchronous version of {@link ArangoCollection#updateDocuments(Iterable, DocumentUpdateOptions, Class)} + */ + CompletableFuture>> updateDocuments( + Iterable values, DocumentUpdateOptions options, Class returnType); + + /** + * Asynchronous version of {@link ArangoCollection#deleteDocument(String)} + */ + CompletableFuture> deleteDocument(String key); + + /** + * Asynchronous version of {@link ArangoCollection#deleteDocument(String, DocumentDeleteOptions)} + */ + CompletableFuture> deleteDocument(String key, DocumentDeleteOptions options); + + /** + * Asynchronous version of {@link ArangoCollection#deleteDocument(String, DocumentDeleteOptions, Class)} + */ + CompletableFuture> deleteDocument(String key, DocumentDeleteOptions options, Class type); + + /** + * Asynchronous version of {@link ArangoCollection#deleteDocuments(RawData)} + */ + CompletableFuture>> deleteDocuments(RawData values); + + /** + * Asynchronous version of {@link ArangoCollection#deleteDocuments(RawData, DocumentDeleteOptions)} + */ + CompletableFuture>> deleteDocuments( + RawData values, DocumentDeleteOptions options); + + /** + * Asynchronous version of {@link ArangoCollection#deleteDocuments(Iterable)} + */ + CompletableFuture>> deleteDocuments(Iterable values); + + /** + * Asynchronous version of {@link ArangoCollection#deleteDocuments(Iterable, DocumentDeleteOptions)} + */ + CompletableFuture>> deleteDocuments( + Iterable values, DocumentDeleteOptions options); + + /** + * Asynchronous version of {@link ArangoCollection#deleteDocuments(Iterable, DocumentDeleteOptions, Class)} + */ + CompletableFuture>> deleteDocuments( + Iterable values, DocumentDeleteOptions options, Class type); + + /** + * Asynchronous version of {@link ArangoCollection#documentExists(String)} + */ + CompletableFuture documentExists(String key); + + /** + * Asynchronous version of {@link ArangoCollection#documentExists(String, DocumentExistsOptions)} + */ + CompletableFuture documentExists(String key, DocumentExistsOptions options); + + /** + * Asynchronous version of {@link ArangoCollection#getIndex(String)} + */ + CompletableFuture getIndex(String id); + + /** + * Asynchronous version of {@link ArangoCollection#getInvertedIndex(String)} + */ + CompletableFuture getInvertedIndex(String id); + + /** + * Asynchronous version of {@link ArangoCollection#deleteIndex(String)} + */ + CompletableFuture deleteIndex(String id); + + /** + * Asynchronous version of {@link ArangoCollection#ensurePersistentIndex(Iterable, PersistentIndexOptions)} + */ + CompletableFuture ensurePersistentIndex(Iterable fields, PersistentIndexOptions options); + + /** + * Asynchronous version of {@link ArangoCollection#ensureGeoIndex(Iterable, GeoIndexOptions)} + */ + CompletableFuture ensureGeoIndex(Iterable fields, GeoIndexOptions options); + + /** + * Asynchronous version of {@link ArangoCollection#ensureFulltextIndex(Iterable, FulltextIndexOptions)} + */ + @Deprecated + CompletableFuture ensureFulltextIndex(Iterable fields, FulltextIndexOptions options); + + /** + * Asynchronous version of {@link ArangoCollection#ensureTtlIndex(Iterable, TtlIndexOptions)} + */ + CompletableFuture ensureTtlIndex(Iterable fields, TtlIndexOptions options); + + /** + * Asynchronous version of {@link ArangoCollection#ensureZKDIndex(Iterable, ZKDIndexOptions)} + * + * @deprecated since ArangoDB 3.12, use {@link #ensureMDIndex(Iterable, MDIndexOptions)} or + * {@link #ensureMDPrefixedIndex(Iterable, MDPrefixedIndexOptions)} instead. + */ + @Deprecated + CompletableFuture ensureZKDIndex(Iterable fields, ZKDIndexOptions options); + + /** + * Asynchronous version of {@link ArangoCollection#ensureMDIndex(Iterable, MDIndexOptions)} + */ + CompletableFuture ensureMDIndex(Iterable fields, MDIndexOptions options); + + /** + * Asynchronous version of {@link ArangoCollection#ensureMDPrefixedIndex(Iterable, MDPrefixedIndexOptions)} + */ + CompletableFuture ensureMDPrefixedIndex(Iterable fields, MDPrefixedIndexOptions options); + + /** + * Asynchronous version of {@link ArangoCollection#ensureInvertedIndex(InvertedIndexOptions)} + */ + CompletableFuture ensureInvertedIndex(InvertedIndexOptions options); + + /** + * Asynchronous version of {@link ArangoCollection#getIndexes()} + */ + CompletableFuture> getIndexes(); + + /** + * Asynchronous version of {@link ArangoCollection#getInvertedIndexes()} + */ + CompletableFuture> getInvertedIndexes(); + + /** + * Asynchronous version of {@link ArangoCollection#exists()} + */ + CompletableFuture exists(); + + /** + * Asynchronous version of {@link ArangoCollection#truncate()} + */ + CompletableFuture truncate(); + + /** + * Asynchronous version of {@link ArangoCollection#truncate(CollectionTruncateOptions)} + */ + CompletableFuture truncate(CollectionTruncateOptions options); + + /** + * Asynchronous version of {@link ArangoCollection#count()} + */ + CompletableFuture count(); + + /** + * Asynchronous version of {@link ArangoCollection#count(CollectionCountOptions)} + */ + CompletableFuture count(CollectionCountOptions options); + + /** + * Asynchronous version of {@link ArangoCollection#create()} + */ + CompletableFuture create(); + + /** + * Asynchronous version of {@link ArangoCollection#create(CollectionCreateOptions)} + */ + CompletableFuture create(CollectionCreateOptions options); + + /** + * Asynchronous version of {@link ArangoCollection#drop()} + */ + CompletableFuture drop(); + + /** + * Asynchronous version of {@link ArangoCollection#drop(boolean)} + */ + CompletableFuture drop(boolean isSystem); + + /** + * Asynchronous version of {@link ArangoCollection#getInfo()} + */ + CompletableFuture getInfo(); + + /** + * Asynchronous version of {@link ArangoCollection#getProperties()} + */ + CompletableFuture getProperties(); + + /** + * Asynchronous version of {@link ArangoCollection#changeProperties(CollectionPropertiesOptions)} + */ + CompletableFuture changeProperties(CollectionPropertiesOptions options); + + /** + * Asynchronous version of {@link ArangoCollection#rename(String)} + */ + CompletableFuture rename(String newName); + + /** + * Asynchronous version of {@link ArangoCollection#getResponsibleShard(Object)} + */ + CompletableFuture getResponsibleShard(final Object value); + + /** + * Asynchronous version of {@link ArangoCollection#getRevision()} + */ + CompletableFuture getRevision(); + + /** + * Asynchronous version of {@link ArangoCollection#grantAccess(String, Permissions)} + */ + CompletableFuture grantAccess(String user, Permissions permissions); + + /** + * Asynchronous version of {@link ArangoCollection#revokeAccess(String)} + */ + CompletableFuture revokeAccess(String user); + + /** + * Asynchronous version of {@link ArangoCollection#resetAccess(String)} + */ + CompletableFuture resetAccess(String user); + + /** + * Asynchronous version of {@link ArangoCollection#getPermissions(String)} + */ + CompletableFuture getPermissions(String user); + +} diff --git a/core/src/main/java/com/arangodb/ArangoCursor.java b/core/src/main/java/com/arangodb/ArangoCursor.java new file mode 100644 index 000000000..2bea76109 --- /dev/null +++ b/core/src/main/java/com/arangodb/ArangoCursor.java @@ -0,0 +1,117 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb; + +import com.arangodb.entity.CursorStats; +import com.arangodb.entity.CursorWarning; +import com.arangodb.model.AqlQueryOptions; +import org.slf4j.LoggerFactory; + +import java.io.Closeable; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.NoSuchElementException; + +/** + * @author Mark Vollmary + */ +public interface ArangoCursor extends ArangoIterable, ArangoIterator, Closeable { + + /** + * @return id of temporary cursor created on the server + */ + String getId(); + + /** + * @return the type of the result elements + */ + Class getType(); + + /** + * @return the total number of result documents available (only available if the query was executed with the count + * attribute set) + */ + Integer getCount(); + + /** + * @return extra information about the query result. For data-modification queries, the stats will contain the + * number of modified documents and the number of documents that could not be modified due to an error (if + * ignoreErrors query option is specified) + */ + CursorStats getStats(); + + /** + * @return warnings which the query could have been produced + */ + Collection getWarnings(); + + /** + * @return indicating whether the query result was served from the query cache or not + */ + boolean isCached(); + + /** + * @return the remaining results as a {@code List} + */ + default List asListRemaining() { + final List remaining = new ArrayList<>(); + while (hasNext()) { + remaining.add(next()); + } + try { + close(); + } catch (final Exception e) { + LoggerFactory.getLogger(ArangoCursor.class).warn("Could not close cursor: ", e); + } + return remaining; + } + + /** + * @return true if the result is a potential dirty read + * @since ArangoDB 3.10 + */ + boolean isPotentialDirtyRead(); + + /** + * @return The ID of the batch after the current one. The first batch has an ID of 1 and the value is incremented by + * 1 with every batch. Only set if the allowRetry query option is enabled. + * @since ArangoDB 3.11 + */ + String getNextBatchId(); + + /** + * Returns the next element in the iteration. + *

+ * If the cursor allows retries (see {@link AqlQueryOptions#allowRetry(Boolean)}), then it is safe to retry invoking + * this method in case of I/O exceptions (which are actually thrown as {@link com.arangodb.ArangoDBException} with + * cause {@link java.io.IOException}). + *

+ * If the cursor does not allow retries (default), then it is not safe to retry invoking this method in case of I/O + * exceptions, since the request to fetch the next batch is not idempotent (i.e. the cursor may advance multiple + * times on the server). + * + * @return the next element in the iteration + * @throws NoSuchElementException if the iteration has no more elements + */ + @Override + T next(); +} diff --git a/core/src/main/java/com/arangodb/ArangoCursorAsync.java b/core/src/main/java/com/arangodb/ArangoCursorAsync.java new file mode 100644 index 000000000..e77bd6257 --- /dev/null +++ b/core/src/main/java/com/arangodb/ArangoCursorAsync.java @@ -0,0 +1,30 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb; + +import java.util.concurrent.CompletableFuture; + +public interface ArangoCursorAsync extends BaseArangoCursor { + + CompletableFuture> nextBatch(); + + CompletableFuture close(); +} diff --git a/core/src/main/java/com/arangodb/ArangoDB.java b/core/src/main/java/com/arangodb/ArangoDB.java new file mode 100644 index 000000000..56efa4758 --- /dev/null +++ b/core/src/main/java/com/arangodb/ArangoDB.java @@ -0,0 +1,832 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb; + +import com.arangodb.arch.UnstableApi; +import com.arangodb.config.ArangoConfigProperties; +import com.arangodb.config.HostDescription; +import com.arangodb.config.ProtocolConfig; +import com.arangodb.entity.*; +import com.arangodb.internal.ArangoDBImpl; +import com.arangodb.internal.ArangoExecutorSync; +import com.arangodb.internal.config.ArangoConfig; +import com.arangodb.internal.net.*; +import com.arangodb.internal.util.HostUtils; +import com.arangodb.model.*; +import com.arangodb.serde.ArangoSerde; +import com.arangodb.serde.ArangoSerdeProvider; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.annotation.concurrent.ThreadSafe; +import javax.net.ssl.SSLContext; +import java.util.*; +import java.util.concurrent.Executor; + +/** + * Central access point for applications to communicate with an ArangoDB server. + * + *

+ * Will be instantiated through {@link ArangoDB.Builder} + *

+ * + *
+ * ArangoDB arango = new ArangoDB.Builder().build();
+ * ArangoDB arango = new ArangoDB.Builder().host("127.0.0.1", 8529).build();
+ * 
+ * + * @author Mark Vollmary + * @author Michele Rastelli + */ +@ThreadSafe +public interface ArangoDB extends ArangoSerdeAccessor { + + /** + * @return the asynchronous version of this class + */ + ArangoDBAsync async(); + + /** + * Releases all connections to the server and clear the connection pool. + */ + void shutdown(); + + /** + * Updates the JWT used for requests authorization. It does not change already existing VST connections, since VST + * connections are authenticated during the initialization phase. + * + * @param jwt token to use + */ + void updateJwt(String jwt); + + /** + * Returns a {@code ArangoDatabase} instance for the {@code _system} database. + * + * @return database handler + */ + ArangoDatabase db(); + + /** + * Returns a {@code ArangoDatabase} instance for the given database name. + * + * @param name Name of the database + * @return database handler + */ + ArangoDatabase db(String name); + + /** + * @return entry point for accessing client metrics + */ + ArangoMetrics metrics(); + + /** + * Creates a new database with the given name. + * + * @param name Name of the database to create + * @return true if the database was created successfully. + * @see API + * Documentation + */ + Boolean createDatabase(String name); + + /** + * Creates a new database with the given name. + * + * @param options Creation options + * @return true if the database was created successfully. + * @see API + * Documentation + * @since ArangoDB 3.6.0 + */ + Boolean createDatabase(DBCreateOptions options); + + /** + * Retrieves a list of all existing databases + * + * @return a list of all existing databases + * @see API + * Documentation + */ + Collection getDatabases(); + + /** + * Retrieves a list of all databases the current user can access + * + * @return a list of all databases the current user can access + * @see API + * Documentation + */ + Collection getAccessibleDatabases(); + + /** + * List available database to the specified user + * + * @param user The name of the user for which you want to query the databases + * @return list of database names which are available for the specified user + * @see API + * Documentation + */ + Collection getAccessibleDatabasesFor(String user); + + /** + * Returns the server name and version number. + * + * @return the server version, number + * @see API + * Documentation + */ + ArangoDBVersion getVersion(); + + /** + * Returns the server storage engine. + * + * @return the storage engine name + * @see API + * Documentation + */ + ArangoDBEngine getEngine(); + + /** + * Returns the server role. + * + * @return the server role + * @see API + * Documentation + */ + ServerRole getRole(); + + /** + * Returns the id of a server in a cluster. + * + * @return the server id + * @see API + * Documentation + */ + String getServerId(); + + /** + * Create a new user. This user will not have access to any database. You need permission to the _system database in + * order to execute this call. + * + * @param user The name of the user + * @param passwd The user password + * @return information about the user + * @see API Documentation + */ + UserEntity createUser(String user, String passwd); + + /** + * Create a new user. This user will not have access to any database. You need permission to the _system database in + * order to execute this call. + * + * @param user The name of the user + * @param passwd The user password + * @param options Additional options, can be null + * @return information about the user + * @see API Documentation + */ + UserEntity createUser(String user, String passwd, UserCreateOptions options); + + /** + * Removes an existing user, identified by user. You need access to the _system database. + * + * @param user The name of the user + * @see API Documentation + */ + void deleteUser(String user); + + /** + * Fetches data about the specified user. You can fetch information about yourself or you need permission to the + * _system database in order to execute this call. + * + * @param user The name of the user + * @return information about the user + * @see API Documentation + */ + UserEntity getUser(String user); + + /** + * Fetches data about all users. You can only execute this call if you have access to the _system database. + * + * @return informations about all users + * @see API + * Documentation + */ + Collection getUsers(); + + /** + * Partially updates the data of an existing user. The name of an existing user must be specified in user. You can + * only change the password of your self. You need access to the _system database to change the active flag. + * + * @param user The name of the user + * @param options Properties of the user to be changed + * @return information about the user + * @see API Documentation + */ + UserEntity updateUser(String user, UserUpdateOptions options); + + /** + * Replaces the data of an existing user. The name of an existing user must be specified in user. You can only + * change the password of your self. You need access to the _system database to change the active flag. + * + * @param user The name of the user + * @param options Additional properties of the user, can be null + * @return information about the user + * @see API + * Documentation + */ + UserEntity replaceUser(String user, UserUpdateOptions options); + + /** + * Sets the default access level for databases for the user {@code user}. You need permission to the _system + * database in order to execute this call. + * + * @param user The name of the user + * @param permissions The permissions the user grant + * @see API + * Documentation + * @since ArangoDB 3.2.0 + */ + void grantDefaultDatabaseAccess(String user, Permissions permissions); + + /** + * Sets the default access level for collections for the user {@code user}. You need permission to the _system + * database in order to execute this call. + * + * @param user The name of the user + * @param permissions The permissions the user grant + * @see API + * Documentation + * @since ArangoDB 3.2.0 + */ + void grantDefaultCollectionAccess(String user, Permissions permissions); + + /** + * Execute custom requests. Requests can be programmatically built by setting low level detail such as method, path, + * query parameters, headers and body payload. + * This method can be used to call FOXX services, API endpoints not (yet) implemented in this driver or trigger + * async jobs, see + * Fire and Forget + * and + * Async Execution and later Result Retrieval + * + * @param request request + * @param type Deserialization target type for the response body (POJO or {@link com.arangodb.util.RawData}) + * @return response + */ + Response execute(Request request, Class type); + + /** + * Returns the server logs + * + * @param options Additional options, can be null + * @return the log messages + * @see API + * Documentation + * @since ArangoDB 3.8 + */ + LogEntriesEntity getLogEntries(LogOptions options); + + /** + * Returns the server's current loglevel settings. + * + * @return the server's current loglevel settings + * @see API + * Documentation + * @since ArangoDB 3.1.0 + */ + LogLevelEntity getLogLevel(); + + /** + * Returns the server's current loglevel settings. + * + * @return the server's current loglevel settings + * @see API + * Documentation + * @since ArangoDB 3.10 + */ + LogLevelEntity getLogLevel(LogLevelOptions options); + + /** + * Modifies and returns the server's current loglevel settings. + * + * @param entity loglevel settings + * @return the server's current loglevel settings + * @see API + * Documentation + * @since ArangoDB 3.1.0 + */ + LogLevelEntity setLogLevel(LogLevelEntity entity); + + /** + * Modifies and returns the server's current loglevel settings. + * + * @param entity loglevel settings + * @return the server's current loglevel settings + * @see API + * Documentation + * @since ArangoDB 3.10 + */ + LogLevelEntity setLogLevel(LogLevelEntity entity, LogLevelOptions options); + + /** + * Reset the server log levels + * Revert the server's log level settings to the values they had at startup, as determined by the startup options specified on the command-line, a configuration file, and the factory defaults. + * + * @since ArangoDB 3.12 + */ + LogLevelEntity resetLogLevels(LogLevelOptions options); + + /** + * @return the list of available rules and their respective flags + * @see API + * Documentation + * @since ArangoDB 3.10 + */ + Collection getQueryOptimizerRules(); + + /** + * Builder class to build an instance of {@link ArangoDB}. + * + * @author Mark Vollmary + */ + class Builder { + private static final Logger LOG = LoggerFactory.getLogger(Builder.class); + private final ArangoConfig config = new ArangoConfig(); + + /** + * Returns an instance of {@link ArangoDB}. + * + * @return {@link ArangoDB} + */ + public ArangoDB build() { + if (config.getHosts().isEmpty()) { + throw new ArangoDBException("No host has been set!"); + } + + ProtocolProvider protocolProvider = protocolProvider(config.getProtocol()); + config.setProtocolModule(protocolProvider.protocolModule()); + + ConnectionFactory connectionFactory = protocolProvider.createConnectionFactory(config.getProtocolConfig()); + Collection hostList = createHostList(connectionFactory); + HostResolver hostResolver = createHostResolver(hostList, connectionFactory); + HostHandler hostHandler = createHostHandler(hostResolver); + hostHandler.setJwt(config.getJwt()); + + CommunicationProtocol protocol = protocolProvider.createProtocol(config, hostHandler); + ArangoExecutorSync executor = new ArangoExecutorSync(protocol, config); + hostResolver.init(executor, config.getInternalSerde()); + + return new ArangoDBImpl( + config, + protocol, + hostHandler + ); + } + + public Builder loadProperties(final ArangoConfigProperties properties) { + config.loadProperties(properties); + return this; + } + + public Builder protocol(final Protocol protocol) { + config.setProtocol(protocol); + return this; + } + + /** + * Adds a host to connect to. Multiple hosts can be added to provide fallbacks. + * + * @param host address of the host + * @param port port of the host + * @return {@link ArangoDB.Builder} + */ + public Builder host(final String host, final int port) { + config.addHost(new HostDescription(host, port)); + return this; + } + + /** + * Sets the connection and request timeout in milliseconds. + * + * @param timeout timeout in milliseconds + * @return {@link ArangoDB.Builder} + */ + public Builder timeout(final Integer timeout) { + config.setTimeout(timeout); + return this; + } + + /** + * Sets the username to use for authentication. + * + * @param user the user in the database (default: {@code root}) + * @return {@link ArangoDB.Builder} + */ + public Builder user(final String user) { + config.setUser(user); + return this; + } + + /** + * Sets the password for the user for authentication. + * + * @param password the password of the user in the database (default: {@code null}) + * @return {@link ArangoDB.Builder} + */ + public Builder password(final String password) { + config.setPassword(password); + return this; + } + + /** + * Sets the JWT for the user authentication. + * + * @param jwt token to use (default: {@code null}) + * @return {@link ArangoDB.Builder} + */ + public Builder jwt(final String jwt) { + config.setJwt(jwt); + return this; + } + + /** + * If set to {@code true} SSL will be used when connecting to an ArangoDB server. + * + * @param useSsl whether or not use SSL (default: {@code false}) + * @return {@link ArangoDB.Builder} + */ + public Builder useSsl(final Boolean useSsl) { + config.setUseSsl(useSsl); + return this; + } + + /** + * Sets the SSL certificate value as Base64 encoded String + * + * @param sslCertValue the SSL certificate value as Base64 encoded String + * @return {@link ArangoDB.Builder} + */ + public Builder sslCertValue(final String sslCertValue) { + config.setSslCertValue(sslCertValue); + return this; + } + + /** + * Sets the SSL Trust manager algorithm + * + * @param sslAlgorithm the name of the SSL Trust manager algorithm + * @return {@link ArangoDB.Builder} + */ + public Builder sslAlgorithm(final String sslAlgorithm) { + config.setSslAlgorithm(sslAlgorithm); + return this; + } + + /** + * Sets the SSLContext protocol, default: {@code TLS} + * + * @param sslProtocol the name of the SSLContext protocol + * @return {@link ArangoDB.Builder} + */ + public Builder sslProtocol(final String sslProtocol) { + config.setSslProtocol(sslProtocol); + return this; + } + + /** + * Sets the SSL context to be used when {@code true} is passed through {@link #useSsl(Boolean)}. + * + * @param sslContext SSL context to be used + * @return {@link ArangoDB.Builder} + */ + public Builder sslContext(final SSLContext sslContext) { + config.setSslContext(sslContext); + return this; + } + + /** + * Set whether hostname verification is enabled + * + * @param verifyHost {@code true} if enabled + * @return {@link ArangoDB.Builder} + */ + public Builder verifyHost(final Boolean verifyHost) { + config.setVerifyHost(verifyHost); + return this; + } + + /** + * Sets the chunk size when {@link Protocol#VST} is used. + * + * @param chunkSize size of a chunk in bytes + * @return {@link ArangoDB.Builder} + */ + public Builder chunkSize(final Integer chunkSize) { + config.setChunkSize(chunkSize); + return this; + } + + /** + * Set whether to use requests pipelining in HTTP/1.1 ({@link Protocol#HTTP_JSON} or {@link Protocol#HTTP_VPACK}). + * + * @param pipelining {@code true} if enabled + * @return {@link ArangoDB.Builder} + */ + public Builder pipelining(final Boolean pipelining) { + config.setPipelining(pipelining); + return this; + } + + /** + * Sets the maximum number of connections the built in connection pool will open per host. + * + *

+ * Defaults: + *

+ * + *
+         * {@link Protocol#VST} == 1
+         * {@link Protocol#HTTP_JSON} == 20
+         * {@link Protocol#HTTP_VPACK} == 20
+         * {@link Protocol#HTTP2_JSON} == 1
+         * {@link Protocol#HTTP2_VPACK} == 1
+         * 
+ * + * @param maxConnections max number of connections + * @return {@link ArangoDB.Builder} + */ + public Builder maxConnections(final Integer maxConnections) { + config.setMaxConnections(maxConnections); + return this; + } + + /** + * Set the time to live of an inactive connection. After this time of inactivity the connection will be + * closed automatically. + * + * @param connectionTtl the time to live of a connection in milliseconds + * @return {@link ArangoDB.Builder} + */ + public Builder connectionTtl(final Long connectionTtl) { + config.setConnectionTtl(connectionTtl); + return this; + } + + /** + * Set the keep-alive interval for VST connections. If set, every VST connection will perform a no-op request + * every {@code keepAliveInterval} seconds, to avoid to be closed due to inactivity by the server (or by the + * external environment, eg. firewall, intermediate routers, operating system). + * + * @param keepAliveInterval interval in seconds + * @return {@link ArangoDB.Builder} + */ + public Builder keepAliveInterval(final Integer keepAliveInterval) { + config.setKeepAliveInterval(keepAliveInterval); + return this; + } + + /** + * Whether the driver should acquire a list of available coordinators in an ArangoDB cluster or a single + * server with active failover. In case of Active-Failover deployment set to {@code true} to enable automatic + * master discovery. + * + *

+ * The host list will be used for failover and load balancing. + *

+ * + * @param acquireHostList whether automatically acquire a list of available hosts (default: false) + * @return {@link ArangoDB.Builder} + */ + public Builder acquireHostList(final Boolean acquireHostList) { + config.setAcquireHostList(acquireHostList); + return this; + } + + /** + * Setting the Interval for acquireHostList + * + * @param acquireHostListInterval Interval in milliseconds + * @return {@link ArangoDB.Builder} + */ + public Builder acquireHostListInterval(final Integer acquireHostListInterval) { + config.setAcquireHostListInterval(acquireHostListInterval); + return this; + } + + /** + * Sets the load balancing strategy to be used in an ArangoDB cluster setup. In case of Active-Failover + * deployment set to {@link LoadBalancingStrategy#NONE} or not set at all, since that would be the default. + * + * @param loadBalancingStrategy the load balancing strategy to be used (default: + * {@link LoadBalancingStrategy#NONE} + * @return {@link ArangoDB.Builder} + */ + public Builder loadBalancingStrategy(final LoadBalancingStrategy loadBalancingStrategy) { + config.setLoadBalancingStrategy(loadBalancingStrategy); + return this; + } + + /** + * Setting the amount of samples kept for queue time metrics + * + * @param responseQueueTimeSamples amount of samples to keep + * @return {@link ArangoDB.Builder} + */ + public Builder responseQueueTimeSamples(final Integer responseQueueTimeSamples) { + config.setResponseQueueTimeSamples(responseQueueTimeSamples); + return this; + } + + /** + * Sets the serde for the user data. + * This is used to serialize and deserialize all the data payload such as: + * - documents, vertexes, edges + * - AQL bind vars + * - body payload of requests and responses in {@link ArangoDB#execute(Request, Class)} + *

+ * However, note that the following types will always be serialized and deserialized using the internal serde: + * - {@link com.fasterxml.jackson.databind.JsonNode} + * - {@link com.arangodb.util.RawJson} + * - {@link com.arangodb.util.RawBytes} + * - {@link com.arangodb.entity.BaseDocument} + * - {@link com.arangodb.entity.BaseEdgeDocument} + * + * @param serde custom serde for the user data + * @return {@link ArangoDB.Builder} + */ + public Builder serde(final ArangoSerde serde) { + config.setUserDataSerde(serde); + return this; + } + + /** + * Sets the serde provider to be used to instantiate the user data serde. + * Ignored if {@link Builder#serde(ArangoSerde)} is used. + * + * @param serdeProviderClass class of the serde provider, it must have a public no-args constructor + * @return {@link ArangoDB.Builder} + */ + public Builder serdeProviderClass(final Class serdeProviderClass) { + config.setUserDataSerdeProvider(serdeProviderClass); + return this; + } + + /** + * Sets the downstream async executor that will be used to consume the responses of the async API, that are returned + * as {@link java.util.concurrent.CompletableFuture} + * + * @param executor async downstream executor + * @return {@link ArangoDB.Builder} + * @deprecated for removal. To consume the responses in a custom executor use async CompletableFuture methods. + */ + @Deprecated + public Builder asyncExecutor(final Executor executor) { + config.setAsyncExecutor(executor); + return this; + } + + /** + * Sets the {@code content-encoding} and {@code accept-encoding} to use for HTTP requests and the related + * algorithm to encode and decode the transferred data. (default: {@link Compression#NONE}) + * + * @param compression format + * @return {@link ArangoDB.Builder} + * @since ArangoDB 3.12 + */ + public Builder compression(final Compression compression) { + config.setCompression(compression); + return this; + } + + /** + * Sets the minimum HTTP request body size (in bytes) to trigger compression. + * (default: {@code 1024}) + * + * @param threshold body size (in bytes) + * @return {@link ArangoDB.Builder} + * @since ArangoDB 3.12 + */ + public Builder compressionThreshold(Integer threshold) { + config.setCompressionThreshold(threshold); + return this; + } + + /** + * Sets the compression level. (default: {@code 6}) + * + * @param level compression level between 0 and 9 + * @return {@link ArangoDB.Builder} + * @since ArangoDB 3.12 + */ + public Builder compressionLevel(Integer level) { + config.setCompressionLevel(level); + return this; + } + + /** + * Configuration specific for {@link com.arangodb.internal.net.ProtocolProvider}. + * + * @return {@link ArangoDB.Builder} + */ + public Builder protocolConfig(ProtocolConfig protocolConfig) { + config.setProtocolConfig(protocolConfig); + return this; + } + + @UnstableApi + protected ProtocolProvider protocolProvider(Protocol protocol) { + ServiceLoader loader = ServiceLoader.load(ProtocolProvider.class); + Iterator iterator = loader.iterator(); + while (iterator.hasNext()) { + ProtocolProvider p; + try { + p = iterator.next(); + } catch (ServiceConfigurationError e) { + LOG.warn("ServiceLoader failed to load ProtocolProvider", e); + continue; + } + if (p.supportsProtocol(protocol)) { + return p; + } + LOG.debug("Required protocol ({}) not supported by ProtocolProvider: {}", protocol, p.getClass().getName()); + } + throw new ArangoDBException("No ProtocolProvider found for protocol: " + protocol); + } + + @UnstableApi + protected HostHandler createHostHandler(@UnstableApi final HostResolver hostResolver) { + + final HostHandler hostHandler; + + LoadBalancingStrategy loadBalancingStrategy = config.getLoadBalancingStrategy(); + if (loadBalancingStrategy != null) { + switch (loadBalancingStrategy) { + case ONE_RANDOM: + hostHandler = new RandomHostHandler(hostResolver, new FallbackHostHandler(hostResolver)); + break; + case ROUND_ROBIN: + hostHandler = new RoundRobinHostHandler(hostResolver); + break; + case NONE: + default: + hostHandler = new FallbackHostHandler(hostResolver); + break; + } + } else { + hostHandler = new FallbackHostHandler(hostResolver); + } + + LOG.debug("HostHandler is {}", hostHandler.getClass().getSimpleName()); + + return new DirtyReadHostHandler(hostHandler, new RoundRobinHostHandler(hostResolver)); + } + + @UnstableApi + protected HostResolver createHostResolver(@UnstableApi final Collection hosts, @UnstableApi final ConnectionFactory connectionFactory) { + Boolean acquireHostList = config.getAcquireHostList(); + if (acquireHostList != null && acquireHostList) { + LOG.debug("acquireHostList -> Use ExtendedHostResolver"); + return new ExtendedHostResolver(new ArrayList<>(hosts), config, connectionFactory, + config.getAcquireHostListInterval()); + } else { + LOG.debug("Use SimpleHostResolver"); + return new SimpleHostResolver(new ArrayList<>(hosts)); + } + } + + @UnstableApi + protected Collection createHostList(@UnstableApi final ConnectionFactory connectionFactory) { + final Collection hostList = new ArrayList<>(); + for (final HostDescription host : config.getHosts()) { + hostList.add(HostUtils.createHost(host, config, connectionFactory)); + } + return hostList; + } + } + +} diff --git a/core/src/main/java/com/arangodb/ArangoDBAsync.java b/core/src/main/java/com/arangodb/ArangoDBAsync.java new file mode 100644 index 000000000..8ea6985e3 --- /dev/null +++ b/core/src/main/java/com/arangodb/ArangoDBAsync.java @@ -0,0 +1,199 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb; + +import com.arangodb.entity.*; +import com.arangodb.model.*; + +import javax.annotation.concurrent.ThreadSafe; +import java.util.Collection; +import java.util.concurrent.CompletableFuture; + +/** + * Asynchronous version of {@link ArangoDB} + */ +@ThreadSafe +public interface ArangoDBAsync extends ArangoSerdeAccessor { + + /** + * Releases all connections to the server and clear the connection pool. + */ + void shutdown(); + + /** + * Updates the JWT used for requests authorization. It does not change already existing VST connections, since VST + * connections are authenticated during the initialization phase. + * + * @param jwt token to use + */ + void updateJwt(String jwt); + + /** + * Returns a {@code ArangoDatabase} instance for the {@code _system} database. + * + * @return database handler + */ + ArangoDatabaseAsync db(); + + /** + * Returns a {@code ArangoDatabase} instance for the given database name. + * + * @param name Name of the database + * @return database handler + */ + ArangoDatabaseAsync db(String name); + + /** + * @return entry point for accessing client metrics + */ + ArangoMetrics metrics(); + + /** + * Asynchronous version of {@link ArangoDB#createDatabase(String)} + */ + CompletableFuture createDatabase(String name); + + /** + * Asynchronous version of {@link ArangoDB#createDatabase(DBCreateOptions)} + */ + CompletableFuture createDatabase(DBCreateOptions options); + + /** + * Asynchronous version of {@link ArangoDB#getDatabases()} + */ + CompletableFuture> getDatabases(); + + /** + * Asynchronous version of {@link ArangoDB#getAccessibleDatabases()} + */ + CompletableFuture> getAccessibleDatabases(); + + /** + * Asynchronous version of {@link ArangoDB#getAccessibleDatabasesFor(String)} + */ + CompletableFuture> getAccessibleDatabasesFor(String user); + + /** + * Asynchronous version of {@link ArangoDB#getVersion()} + */ + CompletableFuture getVersion(); + + /** + * Asynchronous version of {@link ArangoDB#getEngine()} + */ + CompletableFuture getEngine(); + + /** + * Asynchronous version of {@link ArangoDB#getRole()} + */ + CompletableFuture getRole(); + + /** + * Asynchronous version of {@link ArangoDB#getServerId()} + */ + CompletableFuture getServerId(); + + /** + * Asynchronous version of {@link ArangoDB#createUser(String, String)} + */ + CompletableFuture createUser(String user, String passwd); + + /** + * Asynchronous version of {@link ArangoDB#createUser(String, String, UserCreateOptions)} + */ + CompletableFuture createUser(String user, String passwd, UserCreateOptions options); + + /** + * Asynchronous version of {@link ArangoDB#deleteUser(String)} + */ + CompletableFuture deleteUser(String user); + + /** + * Asynchronous version of {@link ArangoDB#getUser(String)} + */ + CompletableFuture getUser(String user); + + /** + * Asynchronous version of {@link ArangoDB#getUsers()} + */ + CompletableFuture> getUsers(); + + /** + * Asynchronous version of {@link ArangoDB#updateUser(String, UserUpdateOptions)} + */ + CompletableFuture updateUser(String user, UserUpdateOptions options); + + /** + * Asynchronous version of {@link ArangoDB#replaceUser(String, UserUpdateOptions)} + */ + CompletableFuture replaceUser(String user, UserUpdateOptions options); + + /** + * Asynchronous version of {@link ArangoDB#grantDefaultDatabaseAccess(String, Permissions)} + */ + CompletableFuture grantDefaultDatabaseAccess(String user, Permissions permissions); + + /** + * Asynchronous version of {@link ArangoDB#grantDefaultCollectionAccess(String, Permissions)} + */ + CompletableFuture grantDefaultCollectionAccess(String user, Permissions permissions); + + /** + * Asynchronous version of {@link ArangoDB#execute(Request, Class)} + */ + CompletableFuture> execute(Request request, Class type); + + /** + * Asynchronous version of {@link ArangoDB#getLogEntries(LogOptions)} + */ + CompletableFuture getLogEntries(LogOptions options); + + /** + * Asynchronous version of {@link ArangoDB#getLogLevel()} + */ + CompletableFuture getLogLevel(); + + /** + * Asynchronous version of {@link ArangoDB#getLogLevel(LogLevelOptions)} + */ + CompletableFuture getLogLevel(LogLevelOptions options); + + /** + * Asynchronous version of {@link ArangoDB#setLogLevel(LogLevelEntity)} + */ + CompletableFuture setLogLevel(LogLevelEntity entity); + + /** + * Asynchronous version of {@link ArangoDB#setLogLevel(LogLevelEntity, LogLevelOptions)} + */ + CompletableFuture setLogLevel(LogLevelEntity entity, LogLevelOptions options); + + /** + * Asynchronous version of {@link ArangoDB#resetLogLevels(LogLevelOptions)} + */ + CompletableFuture resetLogLevels(LogLevelOptions options); + + /** + * Asynchronous version of {@link ArangoDB#getQueryOptimizerRules()} + */ + CompletableFuture> getQueryOptimizerRules(); + +} diff --git a/core/src/main/java/com/arangodb/ArangoDBException.java b/core/src/main/java/com/arangodb/ArangoDBException.java new file mode 100644 index 000000000..60798efcc --- /dev/null +++ b/core/src/main/java/com/arangodb/ArangoDBException.java @@ -0,0 +1,188 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb; + +import com.arangodb.entity.ErrorEntity; + +import java.util.Objects; +import java.util.concurrent.CompletionException; + +/** + * @author Mark Vollmary + */ +public class ArangoDBException extends RuntimeException { + + private static final long serialVersionUID = 6165638002614173801L; + private final ErrorEntity entity; + private final Integer responseCode; + private final Long requestId; + + public ArangoDBException(final ErrorEntity errorEntity) { + super(String.format("Response: %s, Error: %s - %s", errorEntity.getCode(), errorEntity.getErrorNum(), + errorEntity.getErrorMessage())); + this.entity = errorEntity; + this.responseCode = errorEntity.getCode(); + this.requestId = null; + } + + public ArangoDBException(final String message) { + super(message); + this.entity = null; + this.responseCode = null; + this.requestId = null; + } + + public ArangoDBException(final String message, final Integer responseCode) { + super(message); + this.entity = null; + this.responseCode = responseCode; + this.requestId = null; + } + + /** + * @deprecated use {@link com.arangodb.ArangoDBException#of(java.lang.Throwable)} instead + */ + @Deprecated + public ArangoDBException(final Throwable cause) { + super(cause); + this.entity = null; + this.responseCode = null; + this.requestId = null; + } + + /** + * @deprecated use {@link com.arangodb.ArangoDBException#of(String, Throwable)} instead + */ + @Deprecated + public ArangoDBException(final String message, final Throwable cause) { + super(message, cause); + this.entity = null; + this.responseCode = null; + this.requestId = null; + } + + /** + * @deprecated use {@link com.arangodb.ArangoDBException#of(Throwable, Long)} instead + */ + @Deprecated + public ArangoDBException(Throwable cause, long requestId) { + super(cause); + this.entity = null; + this.responseCode = null; + this.requestId = requestId; + } + + private ArangoDBException( + String message, + Throwable cause, + ErrorEntity entity, + Integer responseCode, + Long requestId + ) { + super(message, cause); + this.entity = entity; + this.responseCode = responseCode; + this.requestId = requestId; + } + + public static ArangoDBException of(Throwable t) { + return of(null, t); + } + + public static ArangoDBException of(String message, Throwable t) { + return of(message, t, null); + } + + public static ArangoDBException of(Throwable t, Long requestId) { + return of(null, t, requestId); + } + + private static ArangoDBException of(String message, Throwable t, Long requestId) { + Objects.requireNonNull(t); + if (t instanceof CompletionException) { + return of(message, t.getCause(), requestId); + } + Throwable cause = unwrapCause(t); + String msg = message != null ? message + : t.getMessage() != null ? t.getMessage() + : cause.getMessage(); + ErrorEntity entity = null; + Integer responseCode = null; + Long reqId = requestId; + + if (t instanceof ArangoDBException) { + entity = ((ArangoDBException) t).entity; + responseCode = ((ArangoDBException) t).responseCode; + reqId = reqId != null ? reqId : ((ArangoDBException) t).requestId; + } + + return new ArangoDBException( + msg, + cause, + entity, + responseCode, + reqId + ); + } + + private static Throwable unwrapCause(Throwable t) { + if (t instanceof ArangoDBException && t.getCause() != null) { + return unwrapCause(t.getCause()); + } + return t; + } + + /** + * @return ArangoDB error message + */ + public String getErrorMessage() { + return entity != null ? entity.getErrorMessage() : null; + } + + /** + * @return ArangoDB exception + */ + public String getException() { + return entity != null ? entity.getException() : null; + } + + /** + * @return HTTP response code + */ + public Integer getResponseCode() { + Integer entityResponseCode = entity != null ? entity.getCode() : null; + return responseCode != null ? responseCode : entityResponseCode; + } + + /** + * @return ArangoDB error number + */ + public Integer getErrorNum() { + return entity != null ? entity.getErrorNum() : null; + } + + /** + * @return request id + */ + public Long getRequestId() { + return requestId; + } +} diff --git a/core/src/main/java/com/arangodb/ArangoDBMultipleException.java b/core/src/main/java/com/arangodb/ArangoDBMultipleException.java new file mode 100644 index 000000000..4181cb95d --- /dev/null +++ b/core/src/main/java/com/arangodb/ArangoDBMultipleException.java @@ -0,0 +1,44 @@ +package com.arangodb; + +import java.util.List; +import java.util.Objects; +import java.util.StringJoiner; + +public class ArangoDBMultipleException extends RuntimeException { + + private final List exceptions; + + public ArangoDBMultipleException(List exceptions) { + super(); + this.exceptions = exceptions; + } + + public List getExceptions() { + return exceptions; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + ArangoDBMultipleException that = (ArangoDBMultipleException) o; + return Objects.equals(exceptions, that.exceptions); + } + + @Override + public int hashCode() { + return Objects.hash(exceptions); + } + + @Override + public String toString() { + StringJoiner joiner = new StringJoiner("\n\t", "ArangoDBMultipleException{\n\t", "\n}"); + for (Throwable t : exceptions) { + StringJoiner tJoiner = new StringJoiner("\n\t\t", "\n\t\t", ""); + for (StackTraceElement stackTraceElement : t.getStackTrace()) + tJoiner.add("at " + stackTraceElement); + joiner.add(t + tJoiner.toString()); + } + return joiner.toString(); + } +} diff --git a/core/src/main/java/com/arangodb/ArangoDatabase.java b/core/src/main/java/com/arangodb/ArangoDatabase.java new file mode 100644 index 000000000..4af6cee38 --- /dev/null +++ b/core/src/main/java/com/arangodb/ArangoDatabase.java @@ -0,0 +1,766 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb; + +import com.arangodb.entity.*; +import com.arangodb.entity.arangosearch.analyzer.SearchAnalyzer; +import com.arangodb.model.*; +import com.arangodb.model.arangosearch.AnalyzerDeleteOptions; +import com.arangodb.model.arangosearch.ArangoSearchCreateOptions; +import com.arangodb.model.arangosearch.SearchAliasCreateOptions; + +import javax.annotation.concurrent.ThreadSafe; +import java.util.Collection; +import java.util.Map; + +/** + * Interface for operations on ArangoDB database level. + * + * @author Mark Vollmary + * @author Michele Rastelli + * @see Databases API Documentation + * @see Query API Documentation + */ +@ThreadSafe +public interface ArangoDatabase extends ArangoSerdeAccessor { + + /** + * Return the main entry point for the ArangoDB driver + * + * @return main entry point + */ + ArangoDB arango(); + + /** + * Returns the name of the database + * + * @return database name + */ + String name(); + + /** + * Returns the server name and version number. + * + * @return the server version, number + * @see API + * Documentation + */ + ArangoDBVersion getVersion(); + + /** + * Returns the name of the used storage engine. + * + * @return the storage engine name + * @see API + * Documentation + */ + ArangoDBEngine getEngine(); + + /** + * Checks whether the database exists + * + * @return true if the database exists, otherwise false + * @see API + * Documentation + */ + boolean exists(); + + /** + * Retrieves a list of all databases the current user can access + * + * @return a list of all databases the current user can access + * @see API + * Documentation + */ + Collection getAccessibleDatabases(); + + /** + * Returns a {@code ArangoCollection} instance for the given collection name. + * + * @param name Name of the collection + * @return collection handler + */ + ArangoCollection collection(String name); + + /** + * Creates a collection for the given collection's name, then returns collection information from the server. + * + * @param name The name of the collection + * @return information about the collection + * @see API + * Documentation + */ + CollectionEntity createCollection(String name); + + /** + * Creates a collection with the given {@code options} for this collection's name, then returns collection + * information from the server. + * + * @param name The name of the collection + * @param options Additional options, can be null + * @return information about the collection + * @see API + * Documentation + */ + CollectionEntity createCollection(String name, CollectionCreateOptions options); + + /** + * Fetches all collections from the database and returns an list of collection descriptions. + * + * @return list of information about all collections + * @see API + * Documentation + */ + Collection getCollections(); + + /** + * Fetches all collections from the database and returns an list of collection descriptions. + * + * @param options Additional options, can be null + * @return list of information about all collections + * @see API + * Documentation + */ + Collection getCollections(CollectionsReadOptions options); + + /** + * Returns an index + * + * @param id The index-handle + * @return information about the index + * @see API Documentation + */ + IndexEntity getIndex(String id); + + /** + * Deletes an index + * + * @param id The index-handle + * @return the id of the index + * @see API Documentation + */ + String deleteIndex(String id); + + /** + * Creates the database + * + * @return true if the database was created successfully. + * @see API + * Documentation + */ + Boolean create(); + + /** + * Deletes the database from the server. + * + * @return true if the database was dropped successfully + * @see API + * Documentation + */ + Boolean drop(); + + /** + * Grants or revoke access to the database for user {@code user}. You need permission to the _system database in + * order to execute this call. + * + * @param user The name of the user + * @param permissions The permissions the user grant + * @see + * API Documentation + */ + void grantAccess(String user, Permissions permissions); + + /** + * Grants access to the database for user {@code user}. You need permission to the _system database in order to + * execute this call. + * + * @param user The name of the user + * @see + * API Documentation + */ + void grantAccess(String user); + + /** + * Revokes access to the database dbname for user {@code user}. You need permission to the _system database in order + * to execute this call. + * + * @param user The name of the user + * @see + * API Documentation + */ + void revokeAccess(String user); + + /** + * Clear the database access level, revert back to the default access level. + * + * @param user The name of the user + * @see + * API Documentation + * @since ArangoDB 3.2.0 + */ + void resetAccess(String user); + + /** + * Sets the default access level for collections within this database for the user {@code user}. You need permission + * to the _system database in order to execute this call. + * + * @param user The name of the user + * @param permissions The permissions the user grant + * @see + * API Documentation + * @since ArangoDB 3.2.0 + */ + void grantDefaultCollectionAccess(String user, Permissions permissions); + + /** + * Get specific database access level + * + * @param user The name of the user + * @return permissions of the user + * @see API + * Documentation + * @since ArangoDB 3.2.0 + */ + Permissions getPermissions(String user); + + /** + * Performs a database query using the given {@code query} and {@code bindVars}, then returns a new + * {@code ArangoCursor} instance for the result list. + * + * @param query An AQL query string + * @param type The type of the result (POJO or {@link com.arangodb.util.RawData}) + * @param bindVars key/value pairs defining the variables to bind the query to + * @param options Additional options that will be passed to the query API, can be null + * @return cursor of the results + * @see API + * Documentation + */ + ArangoCursor query(String query, Class type, Map bindVars, AqlQueryOptions options); + + /** + * Performs a database query using the given {@code query}, then returns a new {@code ArangoCursor} instance for the + * result list. + * + * @param query An AQL query string + * @param type The type of the result (POJO or {@link com.arangodb.util.RawData}) + * @param options Additional options that will be passed to the query API, can be null + * @return cursor of the results + * @see API + * Documentation + */ + ArangoCursor query(String query, Class type, AqlQueryOptions options); + + /** + * Performs a database query using the given {@code query} and {@code bindVars}, then returns a new + * {@code ArangoCursor} instance for the result list. + * + * @param query An AQL query string + * @param type The type of the result (POJO or {@link com.arangodb.util.RawData}) + * @param bindVars key/value pairs defining the variables to bind the query to + * @return cursor of the results + * @see API + * Documentation + */ + ArangoCursor query(String query, Class type, Map bindVars); + + /** + * Performs a database query using the given {@code query}, then returns a new {@code ArangoCursor} instance for the + * result list. + * + * @param query An AQL query string + * @param type The type of the result (POJO or {@link com.arangodb.util.RawData}) + * @return cursor of the results + * @see API + * Documentation + */ + ArangoCursor query(String query, Class type); + + /** + * Return an cursor from the given cursor-ID if still existing + * + * @param cursorId The ID of the cursor + * @param type The type of the result (POJO or {@link com.arangodb.util.RawData}) + * @return cursor of the results + * @see API + * Documentation + */ + ArangoCursor cursor(String cursorId, Class type); + + /** + * Return an cursor from the given cursor-ID if still existing + * + * @param cursorId The ID of the cursor + * @param type The type of the result (POJO or {@link com.arangodb.util.RawData}) + * @param options options + * @return cursor of the results + * @see API + * Documentation + */ + ArangoCursor cursor(String cursorId, Class type, AqlQueryOptions options); + + /** + * Return an cursor from the given cursor-ID if still existing + * + * @param cursorId The ID of the cursor + * @param type The type of the result (POJO or {@link com.arangodb.util.RawData}) + * @param nextBatchId The ID of the next cursor batch (set only if cursor allows retries, see + * {@link AqlQueryOptions#allowRetry(Boolean)} + * @return cursor of the results + * @see API Documentation + * @since ArangoDB 3.11 + */ + ArangoCursor cursor(String cursorId, Class type, String nextBatchId); + + /** + * Return an cursor from the given cursor-ID if still existing + * + * @param cursorId The ID of the cursor + * @param type The type of the result (POJO or {@link com.arangodb.util.RawData}) + * @param nextBatchId The ID of the next cursor batch (set only if cursor allows retries, see + * {@link AqlQueryOptions#allowRetry(Boolean)} + * @param options options + * @return cursor of the results + * @see API Documentation + * @since ArangoDB 3.11 + */ + ArangoCursor cursor(String cursorId, Class type, String nextBatchId, AqlQueryOptions options); + + /** + * Explain an AQL query and return information about it + * + * @param query the query which you want explained + * @param bindVars key/value pairs representing the bind parameters + * @param options Additional options, can be null + * @return information about the query + * @see API + * Documentation + * @deprecated for removal, use {@link ArangoDatabase#explainAqlQuery(String, Map, AqlQueryExplainOptions)} instead + */ + @Deprecated + AqlExecutionExplainEntity explainQuery(String query, Map bindVars, AqlQueryExplainOptions options); + + /** + * Explain an AQL query and return information about it + * + * @param query the query which you want explained + * @param bindVars key/value pairs representing the bind parameters + * @param options Additional options, can be null + * @return information about the query + * @see API + * Documentation + * @deprecated for removal, use {@link ArangoDatabase#explainAqlQuery(String, Map, ExplainAqlQueryOptions)} instead + */ + @Deprecated + AqlQueryExplainEntity explainAqlQuery(String query, Map bindVars, AqlQueryExplainOptions options); + + + /** + * Explain an AQL query and return information about it + * + * @param query the query which you want explained + * @param bindVars key/value pairs representing the bind parameters + * @param options Additional options, can be null + * @return information about the query + * @see API + * Documentation + */ + AqlQueryExplainEntity explainAqlQuery(String query, Map bindVars, ExplainAqlQueryOptions options); + + /** + * Parse an AQL query and return information about it This method is for query validation only. To actually query + * the database, see {@link ArangoDatabase#query(String, Class, Map, AqlQueryOptions)} + * + * @param query the query which you want parse + * @return imformation about the query + * @see API + * Documentation + */ + AqlParseEntity parseQuery(String query); + + /** + * Clears the AQL query cache + * + * @see API + * Documentation + */ + void clearQueryCache(); + + /** + * Returns the global configuration for the AQL query cache + * + * @return configuration for the AQL query cache + * @see API + * Documentation + */ + QueryCachePropertiesEntity getQueryCacheProperties(); + + /** + * Changes the configuration for the AQL query cache. Note: changing the properties may invalidate all results in + * the cache. + * + * @param properties properties to be set + * @return current set of properties + * @see API + * Documentation + */ + QueryCachePropertiesEntity setQueryCacheProperties(QueryCachePropertiesEntity properties); + + /** + * Returns the configuration for the AQL query tracking + * + * @return configuration for the AQL query tracking + * @see API + * Documentation + */ + QueryTrackingPropertiesEntity getQueryTrackingProperties(); + + /** + * Changes the configuration for the AQL query tracking + * + * @param properties properties to be set + * @return current set of properties + * @see API + * Documentation + */ + QueryTrackingPropertiesEntity setQueryTrackingProperties(QueryTrackingPropertiesEntity properties); + + /** + * Returns a list of currently running AQL queries + * + * @return a list of currently running AQL queries + * @see API + * Documentation + */ + Collection getCurrentlyRunningQueries(); + + /** + * Returns a list of slow running AQL queries + * + * @return a list of slow running AQL queries + * @see API + * Documentation + */ + Collection getSlowQueries(); + + /** + * Clears the list of slow AQL queries + * + * @see API + * Documentation + */ + void clearSlowQueries(); + + /** + * Kills a running query. The query will be terminated at the next cancelation point. + * + * @param id The id of the query + * @see API + * Documentation + */ + void killQuery(String id); + + /** + * Create a new AQL user function + * + * @param name A valid AQL function name, e.g.: `"myfuncs::accounting::calculate_vat"` + * @param code A String evaluating to a JavaScript function + * @param options Additional options, can be null + * @see API + * Documentation + */ + void createAqlFunction(String name, String code, AqlFunctionCreateOptions options); + + /** + * Deletes the AQL user function with the given name from the database. + * + * @param name The name of the user function to delete + * @param options Additional options, can be null + * @return number of deleted functions (since ArangoDB 3.4.0) + * @see API + * Documentation + */ + Integer deleteAqlFunction(String name, AqlFunctionDeleteOptions options); + + /** + * Gets all reqistered AQL user functions + * + * @param options Additional options, can be null + * @return all reqistered AQL user functions + * @see API + * Documentation + */ + Collection getAqlFunctions(AqlFunctionGetOptions options); + + /** + * Returns a {@code ArangoGraph} instance for the given graph name. + * + * @param name Name of the graph + * @return graph handler + */ + ArangoGraph graph(String name); + + /** + * Create a new graph in the graph module. The creation of a graph requires the name of the graph and a definition + * of its edges. + * + * @param name Name of the graph + * @param edgeDefinitions An array of definitions for the edge + * @return information about the graph + * @see API + * Documentation + */ + GraphEntity createGraph(String name, Iterable edgeDefinitions); + + /** + * Create a new graph in the graph module. The creation of a graph requires the name of the graph and a definition + * of its edges. + * + * @param name Name of the graph + * @param edgeDefinitions An array of definitions for the edge + * @param options Additional options, can be null + * @return information about the graph + * @see API + * Documentation + */ + GraphEntity createGraph(String name, Iterable edgeDefinitions, GraphCreateOptions options); + + /** + * Lists all graphs known to the graph module + * + * @return graphs stored in this database + * @see API + * Documentation + */ + Collection getGraphs(); + + /** + * Performs a server-side transaction and returns its return value. + * + * @param action A String evaluating to a JavaScript function to be executed on the server. + * @param type The type of the result (POJO or {@link com.arangodb.util.RawData}) + * @param options Additional options, can be null + * @return the result of the transaction if it succeeded + * @see API + * Documentation + */ + T transaction(String action, Class type, TransactionOptions options); + + /** + * Begins a Stream Transaction. + * + * @param options Additional options, can be null + * @return information about the transaction + * @see API + * Documentation + * @since ArangoDB 3.5.0 + */ + StreamTransactionEntity beginStreamTransaction(StreamTransactionOptions options); + + /** + * Aborts a Stream Transaction. + * + * @return information about the transaction + * @see API + * Documentation + */ + StreamTransactionEntity abortStreamTransaction(String id); + + /** + * Gets information about a Stream Transaction. + * + * @return information about the transaction + * @see + * API Documentation + * @since ArangoDB 3.5.0 + */ + StreamTransactionEntity getStreamTransaction(String id); + + /** + * Gets all the currently running Stream Transactions. + * + * @return all the currently running Stream Transactions + * @see + * API Documentation + * @since ArangoDB 3.5.0 + */ + Collection getStreamTransactions(); + + /** + * Commits a Stream Transaction. + * + * @return information about the transaction + * @see + * API Documentation + * @since ArangoDB 3.5.0 + */ + StreamTransactionEntity commitStreamTransaction(String id); + + /** + * Retrieves information about the current database + * + * @return information about the current database + * @see API + * Documentation + */ + DatabaseEntity getInfo(); + + /** + * Reload the routing table. + * + * @see API + * Documentation + */ + void reloadRouting(); + + /** + * Fetches all views from the database and returns a list of view descriptions. + * + * @return list of information about all views + * @see API Documentation + * @since ArangoDB 3.4.0 + */ + Collection getViews(); + + /** + * Returns a {@code ArangoView} instance for the given view name. + * + * @param name Name of the view + * @return view handler + * @since ArangoDB 3.4.0 + */ + ArangoView view(String name); + + /** + * Returns a {@link ArangoSearch} instance for the given view name. + * + * @param name Name of the view + * @return ArangoSearch view handler + * @since ArangoDB 3.4.0 + */ + ArangoSearch arangoSearch(String name); + + /** + * Returns a {@link SearchAlias} instance for the given view name. + * + * @param name Name of the view + * @return SearchAlias view handler + * @since ArangoDB 3.10 + */ + SearchAlias searchAlias(String name); + + /** + * Creates a view of the given {@code type}, then returns view information from the server. + * + * @param name The name of the view + * @param type The type of the view + * @return information about the view + * @since ArangoDB 3.4.0 + */ + ViewEntity createView(String name, ViewType type); + + /** + * Creates a ArangoSearch view with the given {@code options}, then returns view information from the server. + * + * @param name The name of the view + * @param options Additional options, can be null + * @return information about the view + * @see API + * Documentation + * @since ArangoDB 3.4.0 + */ + ViewEntity createArangoSearch(String name, ArangoSearchCreateOptions options); + + /** + * Creates a SearchAlias view with the given {@code options}, then returns view information from the server. + * + * @param name The name of the view + * @param options Additional options, can be null + * @return information about the view + * @see API + * Documentation + * @since ArangoDB 3.10 + */ + ViewEntity createSearchAlias(String name, SearchAliasCreateOptions options); + + /** + * Creates an Analyzer + * + * @param analyzer SearchAnalyzer + * @return the created Analyzer + * @see API Documentation + * @since ArangoDB 3.5.0 + */ + SearchAnalyzer createSearchAnalyzer(SearchAnalyzer analyzer); + + /** + * Gets information about an Analyzer + * + * @param name of the Analyzer without database prefix + * @return information about an Analyzer + * @see API Documentation + * @since ArangoDB 3.5.0 + */ + SearchAnalyzer getSearchAnalyzer(String name); + + /** + * Retrieves all analyzers definitions. + * + * @return collection of all analyzers definitions + * @see API Documentation + * @since ArangoDB 3.5.0 + */ + Collection getSearchAnalyzers(); + + /** + * Deletes an Analyzer + * + * @param name of the Analyzer without database prefix + * @see API Documentation + * @since ArangoDB 3.5.0 + */ + void deleteSearchAnalyzer(String name); + + /** + * Deletes an Analyzer + * + * @param name of the Analyzer without database prefix + * @param options AnalyzerDeleteOptions + * @see API Documentation + * @since ArangoDB 3.5.0 + */ + void deleteSearchAnalyzer(String name, AnalyzerDeleteOptions options); + +} diff --git a/core/src/main/java/com/arangodb/ArangoDatabaseAsync.java b/core/src/main/java/com/arangodb/ArangoDatabaseAsync.java new file mode 100644 index 000000000..41b2e34d6 --- /dev/null +++ b/core/src/main/java/com/arangodb/ArangoDatabaseAsync.java @@ -0,0 +1,386 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb; + +import com.arangodb.entity.*; +import com.arangodb.entity.arangosearch.analyzer.SearchAnalyzer; +import com.arangodb.model.*; +import com.arangodb.model.arangosearch.AnalyzerDeleteOptions; +import com.arangodb.model.arangosearch.ArangoSearchCreateOptions; +import com.arangodb.model.arangosearch.SearchAliasCreateOptions; + +import javax.annotation.concurrent.ThreadSafe; +import java.util.Collection; +import java.util.Map; +import java.util.concurrent.CompletableFuture; + +/** + * Asynchronous version of {@link ArangoDatabase} + */ +@ThreadSafe +public interface ArangoDatabaseAsync extends ArangoSerdeAccessor { + + /** + * @return main entry point for async API + */ + ArangoDBAsync arango(); + + /** + * @return database name + */ + String name(); + + /** + * Asynchronous version of {@link ArangoDatabase#getVersion()} + */ + CompletableFuture getVersion(); + + /** + * Asynchronous version of {@link ArangoDatabase#getEngine()} + */ + CompletableFuture getEngine(); + + /** + * Asynchronous version of {@link ArangoDatabase#exists()} + */ + CompletableFuture exists(); + + /** + * Asynchronous version of {@link ArangoDatabase#getAccessibleDatabases()} + */ + CompletableFuture> getAccessibleDatabases(); + + /** + * Returns a {@code ArangoCollectionAsync} instance for the given collection name. + * + * @param name Name of the collection + * @return collection handler + */ + ArangoCollectionAsync collection(String name); + + /** + * Asynchronous version of {@link ArangoDatabase#createCollection(String)} + */ + CompletableFuture createCollection(String name); + + /** + * Asynchronous version of {@link ArangoDatabase#createCollection(String, CollectionCreateOptions)} + */ + CompletableFuture createCollection(String name, CollectionCreateOptions options); + + /** + * Asynchronous version of {@link ArangoDatabase#getCollections()} + */ + CompletableFuture> getCollections(); + + /** + * Asynchronous version of {@link ArangoDatabase#getCollections(CollectionsReadOptions)} + */ + CompletableFuture> getCollections(CollectionsReadOptions options); + + /** + * Asynchronous version of {@link ArangoDatabase#getIndex(String)} + */ + CompletableFuture getIndex(String id); + + /** + * Asynchronous version of {@link ArangoDatabase#deleteIndex(String)} + */ + CompletableFuture deleteIndex(String id); + + /** + * Asynchronous version of {@link ArangoDatabase#create()} + */ + CompletableFuture create(); + + /** + * Asynchronous version of {@link ArangoDatabase#drop()} + */ + CompletableFuture drop(); + + /** + * Asynchronous version of {@link ArangoDatabase#grantAccess(String, Permissions)} + */ + CompletableFuture grantAccess(String user, Permissions permissions); + + /** + * Asynchronous version of {@link ArangoDatabase#grantAccess(String)} + */ + CompletableFuture grantAccess(String user); + + /** + * Asynchronous version of {@link ArangoDatabase#revokeAccess(String)} + */ + CompletableFuture revokeAccess(String user); + + /** + * Asynchronous version of {@link ArangoDatabase#resetAccess(String)} + */ + CompletableFuture resetAccess(String user); + + /** + * Asynchronous version of {@link ArangoDatabase#grantDefaultCollectionAccess(String, Permissions)} + */ + CompletableFuture grantDefaultCollectionAccess(String user, Permissions permissions); + + /** + * Asynchronous version of {@link ArangoDatabase#getPermissions(String)} + */ + CompletableFuture getPermissions(String user); + + CompletableFuture> query(String query, Class type, Map bindVars, AqlQueryOptions options); + + CompletableFuture> query(String query, Class type, AqlQueryOptions options); + + CompletableFuture> query(String query, Class type, Map bindVars); + + CompletableFuture> query(String query, Class type); + + CompletableFuture> cursor(String cursorId, Class type); + + CompletableFuture> cursor(String cursorId, Class type, AqlQueryOptions options); + + CompletableFuture> cursor(String cursorId, Class type, String nextBatchId); + + CompletableFuture> cursor(String cursorId, Class type, String nextBatchId, AqlQueryOptions options); + + /** + * Asynchronous version of {@link ArangoDatabase#explainQuery(String, Map, AqlQueryExplainOptions)} + * + * @deprecated for removal, use {@link ArangoDatabaseAsync#explainAqlQuery(String, Map, AqlQueryExplainOptions)} instead + */ + @Deprecated + CompletableFuture explainQuery(String query, Map bindVars, AqlQueryExplainOptions options); + + /** + * Asynchronous version of {@link ArangoDatabase#explainAqlQuery(String, Map, AqlQueryExplainOptions)} + * + * @deprecated for removal, use {@link ArangoDatabaseAsync#explainAqlQuery(String, Map, ExplainAqlQueryOptions)} instead + */ + @Deprecated + CompletableFuture explainAqlQuery(String query, Map bindVars, AqlQueryExplainOptions options); + + /** + * Asynchronous version of {@link ArangoDatabase#explainAqlQuery(String, Map, ExplainAqlQueryOptions)} + */ + CompletableFuture explainAqlQuery(String query, Map bindVars, ExplainAqlQueryOptions options); + + /** + * Asynchronous version of {@link ArangoDatabase#parseQuery(String)} + */ + CompletableFuture parseQuery(String query); + + /** + * Asynchronous version of {@link ArangoDatabase#clearQueryCache()} + */ + CompletableFuture clearQueryCache(); + + /** + * Asynchronous version of {@link ArangoDatabase#getQueryCacheProperties()} + */ + CompletableFuture getQueryCacheProperties(); + + /** + * Asynchronous version of {@link ArangoDatabase#setQueryCacheProperties(QueryCachePropertiesEntity)} + */ + CompletableFuture setQueryCacheProperties(QueryCachePropertiesEntity properties); + + /** + * Asynchronous version of {@link ArangoDatabase#getQueryTrackingProperties()} + */ + CompletableFuture getQueryTrackingProperties(); + + /** + * Asynchronous version of {@link ArangoDatabase#setQueryTrackingProperties(QueryTrackingPropertiesEntity)} + */ + CompletableFuture setQueryTrackingProperties(QueryTrackingPropertiesEntity properties); + + /** + * Asynchronous version of {@link ArangoDatabase#getCurrentlyRunningQueries()} + */ + CompletableFuture> getCurrentlyRunningQueries(); + + /** + * Asynchronous version of {@link ArangoDatabase#getSlowQueries()} + */ + CompletableFuture> getSlowQueries(); + + /** + * Asynchronous version of {@link ArangoDatabase#clearSlowQueries()} + */ + CompletableFuture clearSlowQueries(); + + /** + * Asynchronous version of {@link ArangoDatabase#killQuery(String)} + */ + CompletableFuture killQuery(String id); + + /** + * Asynchronous version of {@link ArangoDatabase#createAqlFunction(String, String, AqlFunctionCreateOptions)} + */ + CompletableFuture createAqlFunction(String name, String code, AqlFunctionCreateOptions options); + + /** + * Asynchronous version of {@link ArangoDatabase#deleteAqlFunction(String, AqlFunctionDeleteOptions)} + */ + CompletableFuture deleteAqlFunction(String name, AqlFunctionDeleteOptions options); + + /** + * Asynchronous version of {@link ArangoDatabase#getAqlFunctions(AqlFunctionGetOptions)} + */ + CompletableFuture> getAqlFunctions(AqlFunctionGetOptions options); + + /** + * Returns a {@code ArangoGraphAsync} instance for the given graph name. + * + * @param name Name of the graph + * @return graph handler + */ + ArangoGraphAsync graph(String name); + + /** + * Asynchronous version of {@link ArangoDatabase#createGraph(String, Iterable)} + */ + CompletableFuture createGraph(String name, Iterable edgeDefinitions); + + /** + * Asynchronous version of {@link ArangoDatabase#createGraph(String, Iterable, GraphCreateOptions)} + */ + CompletableFuture createGraph(String name, Iterable edgeDefinitions, GraphCreateOptions options); + + /** + * Asynchronous version of {@link ArangoDatabase#getGraphs()} + */ + CompletableFuture> getGraphs(); + + /** + * Asynchronous version of {@link ArangoDatabase#transaction(String, Class, TransactionOptions)} + */ + CompletableFuture transaction(String action, Class type, TransactionOptions options); + + /** + * Asynchronous version of {@link ArangoDatabase#beginStreamTransaction(StreamTransactionOptions)} + */ + CompletableFuture beginStreamTransaction(StreamTransactionOptions options); + + /** + * Asynchronous version of {@link ArangoDatabase#abortStreamTransaction(String)} + */ + CompletableFuture abortStreamTransaction(String id); + + /** + * Asynchronous version of {@link ArangoDatabase#getStreamTransaction(String)} + */ + CompletableFuture getStreamTransaction(String id); + + /** + * Asynchronous version of {@link ArangoDatabase#getStreamTransactions()} + */ + CompletableFuture> getStreamTransactions(); + + /** + * Asynchronous version of {@link ArangoDatabase#commitStreamTransaction(String)} + */ + CompletableFuture commitStreamTransaction(String id); + + /** + * Asynchronous version of {@link ArangoDatabase#getInfo()} + */ + CompletableFuture getInfo(); + + /** + * Asynchronous version of {@link ArangoDatabase#reloadRouting()} + */ + CompletableFuture reloadRouting(); + + /** + * Asynchronous version of {@link ArangoDatabase#getViews()} + */ + CompletableFuture> getViews(); + + /** + * Returns a {@code ArangoViewAsync} instance for the given view name. + * + * @param name Name of the view + * @return view handler + * @since ArangoDB 3.4.0 + */ + ArangoViewAsync view(String name); + + /** + * Returns a {@link ArangoSearchAsync} instance for the given view name. + * + * @param name Name of the view + * @return ArangoSearch view handler + * @since ArangoDB 3.4.0 + */ + ArangoSearchAsync arangoSearch(String name); + + /** + * Returns a {@link SearchAliasAsync} instance for the given view name. + * + * @param name Name of the view + * @return SearchAlias view handler + * @since ArangoDB 3.10 + */ + SearchAliasAsync searchAlias(String name); + + /** + * Asynchronous version of {@link ArangoDatabase#createView(String, ViewType)} + */ + CompletableFuture createView(String name, ViewType type); + + /** + * Asynchronous version of {@link ArangoDatabase#createArangoSearch(String, ArangoSearchCreateOptions)} + */ + CompletableFuture createArangoSearch(String name, ArangoSearchCreateOptions options); + + /** + * Asynchronous version of {@link ArangoDatabase#createSearchAlias(String, SearchAliasCreateOptions)} + */ + CompletableFuture createSearchAlias(String name, SearchAliasCreateOptions options); + + /** + * Asynchronous version of {@link ArangoDatabase#createSearchAnalyzer(SearchAnalyzer)} + */ + CompletableFuture createSearchAnalyzer(SearchAnalyzer analyzer); + + /** + * Asynchronous version of {@link ArangoDatabase#getSearchAnalyzer(String)} + */ + CompletableFuture getSearchAnalyzer(String name); + + /** + * Asynchronous version of {@link ArangoDatabase#getSearchAnalyzers()} + */ + CompletableFuture> getSearchAnalyzers(); + + /** + * Asynchronous version of {@link ArangoDatabase#deleteSearchAnalyzer(String)} + */ + CompletableFuture deleteSearchAnalyzer(String name); + + /** + * Asynchronous version of {@link ArangoDatabase#deleteSearchAnalyzer(String, AnalyzerDeleteOptions)} + */ + CompletableFuture deleteSearchAnalyzer(String name, AnalyzerDeleteOptions options); + +} diff --git a/core/src/main/java/com/arangodb/ArangoEdgeCollection.java b/core/src/main/java/com/arangodb/ArangoEdgeCollection.java new file mode 100644 index 000000000..48f26e95e --- /dev/null +++ b/core/src/main/java/com/arangodb/ArangoEdgeCollection.java @@ -0,0 +1,201 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb; + +import com.arangodb.entity.EdgeEntity; +import com.arangodb.entity.EdgeUpdateEntity; +import com.arangodb.model.*; + +import javax.annotation.concurrent.ThreadSafe; + +/** + * Interface for operations on ArangoDB edge collection level. + * + * @author Mark Vollmary + * @see API Documentation + */ +@ThreadSafe +public interface ArangoEdgeCollection extends ArangoSerdeAccessor { + + /** + * The the handler of the named graph the edge collection is within + * + * @return graph handler + */ + ArangoGraph graph(); + + /** + * The name of the edge collection + * + * @return collection name + */ + String name(); + + /** + * Remove one edge definition from the graph. + * + * @see API + * Documentation + * + * @deprecated use {@link #remove()} instead + */ + @Deprecated + void drop(); + + /** + * Remove one edge definition from the graph. + * + * @param options options + * @see API + * Documentation + * + * @deprecated use {@link #remove(EdgeCollectionRemoveOptions)} instead + */ + @Deprecated + void drop(EdgeCollectionDropOptions options); + + /** + * Remove one edge definition from the graph. + * + * @see API + * Documentation + */ + void remove(); + + /** + * Remove one edge definition from the graph. + * + * @param options options + * @see API + * Documentation + */ + void remove(EdgeCollectionRemoveOptions options); + + /** + * Creates a new edge in the collection + * + * @param value A representation of a single edge (POJO or {@link com.arangodb.util.RawData}) + * @return information about the edge + * @see API Documentation + */ + EdgeEntity insertEdge(Object value); + + /** + * Creates a new edge in the collection + * + * @param value A representation of a single edge (POJO or {@link com.arangodb.util.RawData}) + * @param options Additional options, can be null + * @return information about the edge + * @see API Documentation + */ + EdgeEntity insertEdge(Object value, EdgeCreateOptions options); + + /** + * Fetches an existing edge + * + * @param key The key of the edge + * @param type The type of the edge-document (POJO or {@link com.arangodb.util.RawData}) + * @return the edge identified by the key + * @see API Documentation + */ + T getEdge(String key, Class type); + + /** + * Fetches an existing edge + * + * @param key The key of the edge + * @param type The type of the edge-document (POJO or {@link com.arangodb.util.RawData}) + * @param options Additional options, can be null + * @return the edge identified by the key + * @see API Documentation + */ + T getEdge(String key, Class type, GraphDocumentReadOptions options); + + /** + * Replaces the edge with key with the one in the body, provided there is such a edge and no precondition is + * violated + * + * @param key The key of the edge + * @param value A representation of a single edge (POJO or {@link com.arangodb.util.RawData}) + * @return information about the edge + * @see API Documentation + */ + EdgeUpdateEntity replaceEdge(String key, Object value); + + /** + * Replaces the edge with key with the one in the body, provided there is such a edge and no precondition is + * violated + * + * @param key The key of the edge + * @param value A representation of a single edge (POJO or {@link com.arangodb.util.RawData}) + * @param options Additional options, can be null + * @return information about the edge + * @see API Documentation + */ + EdgeUpdateEntity replaceEdge(String key, Object value, EdgeReplaceOptions options); + + /** + * Partially updates the edge identified by document-key. The value must contain a document with the attributes to + * patch (the patch document). All attributes from the patch document will be added to the existing document if they + * do not yet exist, and overwritten in the existing document if they do exist there. + * + * @param key The key of the edge + * @param value A representation of a single edge (POJO or {@link com.arangodb.util.RawData}) + * @return information about the edge + * @see API Documentation + */ + EdgeUpdateEntity updateEdge(String key, Object value); + + /** + * Partially updates the edge identified by document-key. The value must contain a document with the attributes to + * patch (the patch document). All attributes from the patch document will be added to the existing document if they + * do not yet exist, and overwritten in the existing document if they do exist there. + * + * @param key The key of the edge + * @param value A representation of a single edge (POJO or {@link com.arangodb.util.RawData}) + * @param options Additional options, can be null + * @return information about the edge + * @see API Documentation + */ + EdgeUpdateEntity updateEdge(String key, Object value, EdgeUpdateOptions options); + + /** + * Removes a edge + * + * @param key The key of the edge + * @see API Documentation + */ + void deleteEdge(String key); + + /** + * Removes a edge + * + * @param key The key of the edge + * @param options Additional options, can be null + * @see API Documentation + */ + void deleteEdge(String key, EdgeDeleteOptions options); + +} diff --git a/core/src/main/java/com/arangodb/ArangoEdgeCollectionAsync.java b/core/src/main/java/com/arangodb/ArangoEdgeCollectionAsync.java new file mode 100644 index 000000000..b1509f429 --- /dev/null +++ b/core/src/main/java/com/arangodb/ArangoEdgeCollectionAsync.java @@ -0,0 +1,126 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb; + +import com.arangodb.entity.EdgeEntity; +import com.arangodb.entity.EdgeUpdateEntity; +import com.arangodb.model.*; + +import javax.annotation.concurrent.ThreadSafe; +import java.util.concurrent.CompletableFuture; + +/** + * Asynchronous version of {@link ArangoEdgeCollection} + */ +@ThreadSafe +public interface ArangoEdgeCollectionAsync extends ArangoSerdeAccessor { + + /** + * The the handler of the named graph the edge collection is within + * + * @return graph handler + */ + ArangoGraphAsync graph(); + + /** + * The name of the edge collection + * + * @return collection name + */ + String name(); + + /** + * Asynchronous version of {@link ArangoEdgeCollection#drop()} + * + * @deprecated use {@link #remove()} instead + */ + @Deprecated + CompletableFuture drop(); + + /** + * Asynchronous version of {@link ArangoEdgeCollection#drop(EdgeCollectionDropOptions)} + * + * @deprecated use {@link #remove(EdgeCollectionRemoveOptions)} instead + */ + @Deprecated + CompletableFuture drop(EdgeCollectionDropOptions options); + + /** + * Asynchronous version of {@link ArangoEdgeCollection#remove()} + */ + CompletableFuture remove(); + + /** + * Asynchronous version of {@link ArangoEdgeCollection#remove(EdgeCollectionRemoveOptions)} + */ + CompletableFuture remove(EdgeCollectionRemoveOptions options); + + /** + * Asynchronous version of {@link ArangoEdgeCollection#insertEdge(Object)} + */ + CompletableFuture insertEdge(Object value); + + /** + * Asynchronous version of {@link ArangoEdgeCollection#insertEdge(Object, EdgeCreateOptions)} + */ + CompletableFuture insertEdge(Object value, EdgeCreateOptions options); + + /** + * Asynchronous version of {@link ArangoEdgeCollection#getEdge(String, Class)} + */ + CompletableFuture getEdge(String key, Class type); + + /** + * Asynchronous version of {@link ArangoEdgeCollection#getEdge(String, Class, GraphDocumentReadOptions)} + */ + CompletableFuture getEdge(String key, Class type, GraphDocumentReadOptions options); + + /** + * Asynchronous version of {@link ArangoEdgeCollection#replaceEdge(String, Object)} + */ + CompletableFuture replaceEdge(String key, Object value); + + /** + * Asynchronous version of {@link ArangoEdgeCollection#replaceEdge(String, Object, EdgeReplaceOptions)} + */ + CompletableFuture replaceEdge(String key, Object value, EdgeReplaceOptions options); + + /** + * Asynchronous version of {@link ArangoEdgeCollection#updateEdge(String, Object)} + */ + CompletableFuture updateEdge(String key, Object value); + + /** + * Asynchronous version of {@link ArangoEdgeCollection#updateEdge(String, Object, EdgeUpdateOptions)} + */ + CompletableFuture updateEdge(String key, Object value, EdgeUpdateOptions options); + + /** + * Asynchronous version of {@link ArangoEdgeCollection#deleteEdge(String)} + */ + CompletableFuture deleteEdge(String key); + + /** + * Asynchronous version of {@link ArangoEdgeCollection#deleteEdge(String, EdgeDeleteOptions)} + */ + CompletableFuture deleteEdge(String key, EdgeDeleteOptions options); + +} diff --git a/core/src/main/java/com/arangodb/ArangoGraph.java b/core/src/main/java/com/arangodb/ArangoGraph.java new file mode 100644 index 000000000..444f70d23 --- /dev/null +++ b/core/src/main/java/com/arangodb/ArangoGraph.java @@ -0,0 +1,206 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb; + +import com.arangodb.entity.EdgeDefinition; +import com.arangodb.entity.GraphEntity; +import com.arangodb.model.GraphCreateOptions; +import com.arangodb.model.ReplaceEdgeDefinitionOptions; +import com.arangodb.model.VertexCollectionCreateOptions; + +import javax.annotation.concurrent.ThreadSafe; +import java.util.Collection; + +/** + * Interface for operations on ArangoDB graph level. + * + * @author Mark Vollmary + * @see API Documentation + */ +@ThreadSafe +public interface ArangoGraph extends ArangoSerdeAccessor { + + /** + * The the handler of the database the named graph is within + * + * @return database handler + */ + ArangoDatabase db(); + + /** + * The name of the collection + * + * @return collection name + */ + String name(); + + /** + * Checks whether the graph exists + * + * @return true if the graph exists, otherwise false + * @see + * API Documentation + */ + boolean exists(); + + /** + * Creates the graph in the graph module. The creation of a graph requires the name of the graph and a definition of + * its edges. + * + * @param edgeDefinitions An array of definitions for the edge + * @return information about the graph + * @see API + * Documentation + */ + GraphEntity create(Iterable edgeDefinitions); + + /** + * Creates the graph in the graph module. The creation of a graph requires the name of the graph and a definition of + * its edges. + * + * @param edgeDefinitions An array of definitions for the edge + * @param options Additional options, can be null + * @return information about the graph + * @see API + * Documentation + */ + GraphEntity create(Iterable edgeDefinitions, GraphCreateOptions options); + + /** + * Deletes the graph from the database. + * + * @see + * API Documentation + */ + void drop(); + + /** + * Deletes the graph from the database. + * + * @param dropCollections Drop collections of this graph as well. Collections will only be + * dropped if they are not used in other graphs. + * @see API + * Documentation + */ + void drop(boolean dropCollections); + + /** + * Retrieves general information about the graph. + * + * @return the definition content of this graph + * @see + * API Documentation + */ + GraphEntity getInfo(); + + /** + * Fetches all vertex collections from the graph and returns a list of collection names. + * + * @return all vertex collections within this graph + * @see API + * Documentation + */ + Collection getVertexCollections(); + + /** + * Adds a vertex collection to the set of collections of the graph. If the collection does not exist, it will be + * created. + * + * @param name Name of the vertex collection + * @return information about the graph + * @see API + * Documentation + */ + GraphEntity addVertexCollection(String name); + + /** + * Adds a vertex collection to the set of collections of the graph. If the collection does not exist, it will be + * created. + * + * @param name Name of the vertex collection + * @param options additional options + * @return information about the graph + * @see API + * Documentation + * @since ArangoDB 3.9 + */ + GraphEntity addVertexCollection(String name, VertexCollectionCreateOptions options); + + /** + * Returns a {@code ArangoVertexCollection} instance for the given vertex collection name. + * + * @param name Name of the vertex collection + * @return collection handler + */ + ArangoVertexCollection vertexCollection(String name); + + /** + * Returns a {@code ArangoEdgeCollection} instance for the given edge collection name. + * + * @param name Name of the edge collection + * @return collection handler + */ + ArangoEdgeCollection edgeCollection(String name); + + /** + * Fetches all edge collections from the graph and returns a list of collection names. + * + * @return all edge collections within this graph + * @see API + * Documentation + */ + Collection getEdgeDefinitions(); + + /** + * Adds the given edge definition to the graph. + * + * @param definition The edge definition + * @return information about the graph + * @see API + * Documentation + */ + GraphEntity addEdgeDefinition(EdgeDefinition definition); + + /** + * Change one specific edge definition. This will modify all occurrences of this definition in all graphs known to + * your database + * + * @param definition The edge definition + * @return information about the graph + * @see API + * Documentation + */ + GraphEntity replaceEdgeDefinition(EdgeDefinition definition); + + /** + * Change one specific edge definition. This will modify all occurrences of this definition in all graphs known to + * your database + * + * @param definition The edge definition + * @param options options + * @return information about the graph + * @see API + * Documentation + */ + GraphEntity replaceEdgeDefinition(EdgeDefinition definition, ReplaceEdgeDefinitionOptions options); + +} diff --git a/core/src/main/java/com/arangodb/ArangoGraphAsync.java b/core/src/main/java/com/arangodb/ArangoGraphAsync.java new file mode 100644 index 000000000..2232c64ad --- /dev/null +++ b/core/src/main/java/com/arangodb/ArangoGraphAsync.java @@ -0,0 +1,130 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb; + +import com.arangodb.entity.EdgeDefinition; +import com.arangodb.entity.GraphEntity; +import com.arangodb.model.GraphCreateOptions; +import com.arangodb.model.ReplaceEdgeDefinitionOptions; +import com.arangodb.model.VertexCollectionCreateOptions; + +import javax.annotation.concurrent.ThreadSafe; +import java.util.Collection; +import java.util.concurrent.CompletableFuture; + +/** + * Asynchronous version of {@link ArangoGraph} + */ +@ThreadSafe +public interface ArangoGraphAsync extends ArangoSerdeAccessor { + + /** + * @return database async API + */ + ArangoDatabaseAsync db(); + + /** + * @return graph name + */ + String name(); + + /** + * Asynchronous version of {@link ArangoGraph#exists()} + */ + CompletableFuture exists(); + + /** + * Asynchronous version of {@link ArangoGraph#create(Iterable)} + */ + CompletableFuture create(Iterable edgeDefinitions); + + /** + * Asynchronous version of {@link ArangoGraph#create(Iterable, GraphCreateOptions)} + */ + CompletableFuture create(Iterable edgeDefinitions, GraphCreateOptions options); + + /** + * Asynchronous version of {@link ArangoGraph#drop()} + */ + CompletableFuture drop(); + + /** + * Asynchronous version of {@link ArangoGraph#drop(boolean)} + */ + CompletableFuture drop(boolean dropCollections); + + /** + * Asynchronous version of {@link ArangoGraph#getInfo()} + */ + CompletableFuture getInfo(); + + /** + * Asynchronous version of {@link ArangoGraph#getVertexCollections()} + */ + CompletableFuture> getVertexCollections(); + + /** + * Asynchronous version of {@link ArangoGraph#addVertexCollection(String)} + */ + CompletableFuture addVertexCollection(String name); + + /** + * Asynchronous version of {@link ArangoGraph#addVertexCollection(String, VertexCollectionCreateOptions)} + */ + CompletableFuture addVertexCollection(String name, VertexCollectionCreateOptions options); + + /** + * Returns a {@code ArangoVertexCollectionAsync} instance for the given vertex collection name. + * + * @param name Name of the vertex collection + * @return collection handler + */ + ArangoVertexCollectionAsync vertexCollection(String name); + + /** + * Returns a {@code ArangoEdgeCollectionAsync} instance for the given edge collection name. + * + * @param name Name of the edge collection + * @return collection handler + */ + ArangoEdgeCollectionAsync edgeCollection(String name); + + /** + * Asynchronous version of {@link ArangoGraph#getEdgeDefinitions()} + */ + CompletableFuture> getEdgeDefinitions(); + + /** + * Asynchronous version of {@link ArangoGraph#addEdgeDefinition(EdgeDefinition)} + */ + CompletableFuture addEdgeDefinition(EdgeDefinition definition); + + /** + * Asynchronous version of {@link ArangoGraph#replaceEdgeDefinition(EdgeDefinition)} + */ + CompletableFuture replaceEdgeDefinition(EdgeDefinition definition); + + /** + * Asynchronous version of {@link ArangoGraph#replaceEdgeDefinition(EdgeDefinition, ReplaceEdgeDefinitionOptions)} + */ + CompletableFuture replaceEdgeDefinition(EdgeDefinition definition, ReplaceEdgeDefinitionOptions options); + +} diff --git a/core/src/main/java/com/arangodb/ArangoIterable.java b/core/src/main/java/com/arangodb/ArangoIterable.java new file mode 100644 index 000000000..8562e5f94 --- /dev/null +++ b/core/src/main/java/com/arangodb/ArangoIterable.java @@ -0,0 +1,38 @@ +/* + * DISCLAIMER + * + * Copyright 2018 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb; + +import java.util.stream.Stream; +import java.util.stream.StreamSupport; + +/** + * @author Mark Vollmary + */ +public interface ArangoIterable extends Iterable { + + @Override + ArangoIterator iterator(); + + default Stream stream() { + return StreamSupport.stream(spliterator(), false); + } + +} diff --git a/src/main/java/com/arangodb/util/ArangoSerialization.java b/core/src/main/java/com/arangodb/ArangoIterator.java similarity index 77% rename from src/main/java/com/arangodb/util/ArangoSerialization.java rename to core/src/main/java/com/arangodb/ArangoIterator.java index 06314af72..26fd8ae46 100644 --- a/src/main/java/com/arangodb/util/ArangoSerialization.java +++ b/core/src/main/java/com/arangodb/ArangoIterator.java @@ -1,29 +1,30 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.util; - -/** - * @author Mark Vollmary - * - */ -public interface ArangoSerialization extends ArangoSerializer, ArangoDeserializer { - -} +/* + * DISCLAIMER + * + * Copyright 2018 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb; + +import java.util.Iterator; + +/** + * @author Mark Vollmary + */ +public interface ArangoIterator extends Iterator { + +} diff --git a/core/src/main/java/com/arangodb/ArangoMetrics.java b/core/src/main/java/com/arangodb/ArangoMetrics.java new file mode 100644 index 000000000..38253fac4 --- /dev/null +++ b/core/src/main/java/com/arangodb/ArangoMetrics.java @@ -0,0 +1,37 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb; + +import javax.annotation.concurrent.ThreadSafe; + +/** + * Interface for accessing metrics. + * + * @author Michele Rastelli + * @since ArangoDB 3.9 + */ +@ThreadSafe +public interface ArangoMetrics { + /** + * @return queue time metrics + */ + QueueTimeMetrics getQueueTime(); +} diff --git a/core/src/main/java/com/arangodb/ArangoSearch.java b/core/src/main/java/com/arangodb/ArangoSearch.java new file mode 100644 index 000000000..95d7a604e --- /dev/null +++ b/core/src/main/java/com/arangodb/ArangoSearch.java @@ -0,0 +1,90 @@ +/* + * DISCLAIMER + * + * Copyright 2018 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb; + +import com.arangodb.entity.ViewEntity; +import com.arangodb.entity.arangosearch.ArangoSearchPropertiesEntity; +import com.arangodb.model.arangosearch.ArangoSearchCreateOptions; +import com.arangodb.model.arangosearch.ArangoSearchPropertiesOptions; + +import javax.annotation.concurrent.ThreadSafe; + +/** + * Interface for operations on ArangoDB view level for ArangoSearch views. + * + * @author Mark Vollmary + * @see View API Documentation + * @since ArangoDB 3.4.0 + */ +@ThreadSafe +public interface ArangoSearch extends ArangoView { + + /** + * Creates a view, then returns view information from the server. + * + * @return information about the view + * @see API + * Documentation + */ + ViewEntity create(); + + /** + * Creates a view with the given {@code options}, then returns view information from the server. + * + * @param options Additional options, can be null + * @return information about the view + * @see API + * Documentation + */ + ViewEntity create(ArangoSearchCreateOptions options); + + /** + * Reads the properties of the specified view. + * + * @return properties of the view + * @see API + * Documentation + */ + ArangoSearchPropertiesEntity getProperties(); + + /** + * Partially changes properties of the view. + * + * @param options properties to change + * @return properties of the view + * @see API + * Documentation + */ + ArangoSearchPropertiesEntity updateProperties(ArangoSearchPropertiesOptions options); + + /** + * Changes properties of the view. + * + * @param options properties to change + * @return properties of the view + * @see API + * Documentation + */ + ArangoSearchPropertiesEntity replaceProperties(ArangoSearchPropertiesOptions options); + +} diff --git a/core/src/main/java/com/arangodb/ArangoSearchAsync.java b/core/src/main/java/com/arangodb/ArangoSearchAsync.java new file mode 100644 index 000000000..888b5e893 --- /dev/null +++ b/core/src/main/java/com/arangodb/ArangoSearchAsync.java @@ -0,0 +1,62 @@ +/* + * DISCLAIMER + * + * Copyright 2018 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb; + +import com.arangodb.entity.ViewEntity; +import com.arangodb.entity.arangosearch.ArangoSearchPropertiesEntity; +import com.arangodb.model.arangosearch.ArangoSearchCreateOptions; +import com.arangodb.model.arangosearch.ArangoSearchPropertiesOptions; + +import javax.annotation.concurrent.ThreadSafe; +import java.util.concurrent.CompletableFuture; + +/** + * Asynchronous version of {@link ArangoSearch} + */ +@ThreadSafe +public interface ArangoSearchAsync extends ArangoViewAsync { + + /** + * Asynchronous version of {@link ArangoSearch#create()} + */ + CompletableFuture create(); + + /** + * Asynchronous version of {@link ArangoSearch#create(ArangoSearchCreateOptions)} + */ + CompletableFuture create(ArangoSearchCreateOptions options); + + /** + * Asynchronous version of {@link ArangoSearch#getProperties()} + */ + CompletableFuture getProperties(); + + /** + * Asynchronous version of {@link ArangoSearch#updateProperties(ArangoSearchPropertiesOptions)} + */ + CompletableFuture updateProperties(ArangoSearchPropertiesOptions options); + + /** + * Asynchronous version of {@link ArangoSearch#replaceProperties(ArangoSearchPropertiesOptions)} + */ + CompletableFuture replaceProperties(ArangoSearchPropertiesOptions options); + +} diff --git a/core/src/main/java/com/arangodb/ArangoSerdeAccessor.java b/core/src/main/java/com/arangodb/ArangoSerdeAccessor.java new file mode 100644 index 000000000..10d04d394 --- /dev/null +++ b/core/src/main/java/com/arangodb/ArangoSerdeAccessor.java @@ -0,0 +1,42 @@ +/* + * DISCLAIMER + * + * Copyright 2018 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb; + +import com.arangodb.arch.UnstableApi; +import com.arangodb.internal.serde.InternalSerde; + +import javax.annotation.concurrent.ThreadSafe; + +/** + * @author Mark Vollmary + */ +@ThreadSafe +public interface ArangoSerdeAccessor { + + /** + * Returns driver internal serialization implementation for serializing and deserializing driver's classes. + * + * @return ArangoSerde + */ + @UnstableApi + InternalSerde getSerde(); + +} diff --git a/core/src/main/java/com/arangodb/ArangoVertexCollection.java b/core/src/main/java/com/arangodb/ArangoVertexCollection.java new file mode 100644 index 000000000..5b9adaee5 --- /dev/null +++ b/core/src/main/java/com/arangodb/ArangoVertexCollection.java @@ -0,0 +1,205 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb; + +import com.arangodb.entity.VertexEntity; +import com.arangodb.entity.VertexUpdateEntity; +import com.arangodb.model.*; + +import javax.annotation.concurrent.ThreadSafe; + +/** + * Interface for operations on ArangoDB vertex collection level. + * + * @author Mark Vollmary + * @see API Documentation + */ +@ThreadSafe +public interface ArangoVertexCollection extends ArangoSerdeAccessor { + + /** + * The handler of the named graph the edge collection is within + * + * @return graph handler + */ + ArangoGraph graph(); + + /** + * The name of the edge collection + * + * @return collection name + */ + String name(); + + /** + * Remove a vertex collection form the graph. + * + * @see API + * Documentation + * + * @deprecated use {@link #remove()} instead + */ + @Deprecated + void drop(); + + /** + * Remove a vertex collection form the graph. + * + * @param options options + * @see API + * Documentation + * + * @deprecated use {@link #remove(VertexCollectionRemoveOptions)} instead + */ + @Deprecated + void drop(VertexCollectionDropOptions options); + + /** + * Remove a vertex collection form the graph. + * + * @see API + * Documentation + */ + void remove(); + + /** + * Remove a vertex collection form the graph. + * + * @param options options + * @see API + * Documentation + */ + void remove(VertexCollectionRemoveOptions options); + + /** + * Creates a new vertex in the collection + * + * @param value A representation of a single vertex (POJO or {@link com.arangodb.util.RawData}) + * @return information about the vertex + * @see + * API Documentation + */ + VertexEntity insertVertex(Object value); + + /** + * Creates a new vertex in the collection + * + * @param value A representation of a single vertex (POJO or {@link com.arangodb.util.RawData}) + * @param options Additional options, can be null + * @return information about the vertex + * @see + * API Documentation + */ + VertexEntity insertVertex(Object value, VertexCreateOptions options); + + /** + * Retrieves the vertex document with the given {@code key} from the collection. + * + * @param key The key of the vertex + * @param type The type of the vertex-document (POJO or {@link com.arangodb.util.RawData}) + * @return the vertex identified by the key + * @see API Documentation + */ + T getVertex(String key, Class type); + + /** + * Retrieves the vertex document with the given {@code key} from the collection. + * + * @param key The key of the vertex + * @param type The type of the vertex-document (POJO or {@link com.arangodb.util.RawData}) + * @param options Additional options, can be null + * @return the vertex identified by the key + * @see API Documentation + */ + T getVertex(String key, Class type, GraphDocumentReadOptions options); + + /** + * Replaces the vertex with key with the one in the body, provided there is such a vertex and no precondition is + * violated + * + * @param key The key of the vertex + * @param value A representation of a single vertex (POJO or {@link com.arangodb.util.RawData}) + * @return information about the vertex + * @see API + * Documentation + */ + VertexUpdateEntity replaceVertex(String key, Object value); + + /** + * Replaces the vertex with key with the one in the body, provided there is such a vertex and no precondition is + * violated + * + * @param key The key of the vertex + * @param value A representation of a single vertex (POJO or {@link com.arangodb.util.RawData}) + * @param options Additional options, can be null + * @return information about the vertex + * @see API + * Documentation + */ + VertexUpdateEntity replaceVertex(String key, Object value, VertexReplaceOptions options); + + /** + * Partially updates the vertex identified by document-key. The value must contain a document with the attributes to + * patch (the patch document). All attributes from the patch document will be added to the existing document if they + * do not yet exist, and overwritten in the existing document if they do exist there. + * + * @param key The key of the vertex + * @param value A representation of a single vertex (POJO or {@link com.arangodb.util.RawData}) + * @return information about the vertex + * @see + * API Documentation + */ + VertexUpdateEntity updateVertex(String key, Object value); + + /** + * Partially updates the vertex identified by document-key. The value must contain a document with the attributes to + * patch (the patch document). All attributes from the patch document will be added to the existing document if they + * do not yet exist, and overwritten in the existing document if they do exist there. + * + * @param key The key of the vertex + * @param value A representation of a single vertex (POJO or {@link com.arangodb.util.RawData}) + * @param options Additional options, can be null + * @return information about the vertex + * @see + * API Documentation + */ + VertexUpdateEntity updateVertex(String key, Object value, VertexUpdateOptions options); + + /** + * Deletes the vertex with the given {@code key} from the collection. + * + * @param key The key of the vertex + * @see + * API Documentation + */ + void deleteVertex(String key); + + /** + * Deletes the vertex with the given {@code key} from the collection. + * + * @param key The key of the vertex + * @param options Additional options, can be null + * @see + * API Documentation + */ + void deleteVertex(String key, VertexDeleteOptions options); + +} diff --git a/core/src/main/java/com/arangodb/ArangoVertexCollectionAsync.java b/core/src/main/java/com/arangodb/ArangoVertexCollectionAsync.java new file mode 100644 index 000000000..65ece4dcb --- /dev/null +++ b/core/src/main/java/com/arangodb/ArangoVertexCollectionAsync.java @@ -0,0 +1,126 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb; + +import com.arangodb.entity.VertexEntity; +import com.arangodb.entity.VertexUpdateEntity; +import com.arangodb.model.*; + +import javax.annotation.concurrent.ThreadSafe; +import java.util.concurrent.CompletableFuture; + +/** + * Asynchronous version of {@link ArangoVertexCollection} + */ +@ThreadSafe +public interface ArangoVertexCollectionAsync extends ArangoSerdeAccessor { + + /** + * The the handler of the named graph the edge collection is within + * + * @return graph handler + */ + ArangoGraphAsync graph(); + + /** + * The name of the edge collection + * + * @return collection name + */ + String name(); + + /** + * Asynchronous version of {@link ArangoVertexCollection#drop()} + * + * @deprecated use {@link #remove()} instead + */ + @Deprecated + CompletableFuture drop(); + + /** + * Asynchronous version of {@link ArangoVertexCollection#drop(VertexCollectionDropOptions)} + * + * @deprecated use {@link #remove(VertexCollectionRemoveOptions)} instead + */ + @Deprecated + CompletableFuture drop(VertexCollectionDropOptions options); + + /** + * Asynchronous version of {@link ArangoVertexCollection#remove()} + */ + CompletableFuture remove(); + + /** + * Asynchronous version of {@link ArangoVertexCollection#remove(VertexCollectionRemoveOptions)} + */ + CompletableFuture remove(VertexCollectionRemoveOptions options); + + /** + * Asynchronous version of {@link ArangoVertexCollection#insertVertex(Object)} + */ + CompletableFuture insertVertex(Object value); + + /** + * Asynchronous version of {@link ArangoVertexCollection#insertVertex(Object, VertexCreateOptions)} + */ + CompletableFuture insertVertex(Object value, VertexCreateOptions options); + + /** + * Asynchronous version of {@link ArangoVertexCollection#getVertex(String, Class)} + */ + CompletableFuture getVertex(String key, Class type); + + /** + * Asynchronous version of {@link ArangoVertexCollection#getVertex(String, Class, GraphDocumentReadOptions)} + */ + CompletableFuture getVertex(String key, Class type, GraphDocumentReadOptions options); + + /** + * Asynchronous version of {@link ArangoVertexCollection#replaceVertex(String, Object)} + */ + CompletableFuture replaceVertex(String key, Object value); + + /** + * Asynchronous version of {@link ArangoVertexCollection#replaceVertex(String, Object, VertexReplaceOptions)} + */ + CompletableFuture replaceVertex(String key, Object value, VertexReplaceOptions options); + + /** + * Asynchronous version of {@link ArangoVertexCollection#updateVertex(String, Object)} + */ + CompletableFuture updateVertex(String key, Object value); + + /** + * Asynchronous version of {@link ArangoVertexCollection#updateVertex(String, Object, VertexUpdateOptions)} + */ + CompletableFuture updateVertex(String key, Object value, VertexUpdateOptions options); + + /** + * Asynchronous version of {@link ArangoVertexCollection#deleteVertex(String)} + */ + CompletableFuture deleteVertex(String key); + + /** + * Asynchronous version of {@link ArangoVertexCollection#deleteVertex(String, VertexDeleteOptions)} + */ + CompletableFuture deleteVertex(String key, VertexDeleteOptions options); + +} diff --git a/core/src/main/java/com/arangodb/ArangoView.java b/core/src/main/java/com/arangodb/ArangoView.java new file mode 100644 index 000000000..209f6f8e5 --- /dev/null +++ b/core/src/main/java/com/arangodb/ArangoView.java @@ -0,0 +1,89 @@ +/* + * DISCLAIMER + * + * Copyright 2018 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb; + +import com.arangodb.entity.ViewEntity; + +import javax.annotation.concurrent.ThreadSafe; + +/** + * Interface for operations on ArangoDB view level. + * + * @author Mark Vollmary + * @see View API Documentation + * @since ArangoDB 3.4.0 + */ +@ThreadSafe +public interface ArangoView extends ArangoSerdeAccessor { + + /** + * The the handler of the database the collection is within + * + * @return database handler + */ + ArangoDatabase db(); + + /** + * The name of the view + * + * @return view name + */ + String name(); + + /** + * Checks whether the view exists. + * + * @return true if the view exists, otherwise false + * @see + * API + * Documentation + */ + boolean exists(); + + /** + * Deletes the view from the database. + * + * @see + * API Documentation + */ + void drop(); + + /** + * Renames the view. + * + * @param newName The new name + * @return information about the view + * @see + * API Documentation + */ + ViewEntity rename(String newName); + + /** + * Returns information about the view. + * + * @return information about the view + * @see + * API + * Documentation + */ + ViewEntity getInfo(); + +} diff --git a/core/src/main/java/com/arangodb/ArangoViewAsync.java b/core/src/main/java/com/arangodb/ArangoViewAsync.java new file mode 100644 index 000000000..57cf7f2a8 --- /dev/null +++ b/core/src/main/java/com/arangodb/ArangoViewAsync.java @@ -0,0 +1,68 @@ +/* + * DISCLAIMER + * + * Copyright 2018 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb; + +import com.arangodb.entity.ViewEntity; + +import javax.annotation.concurrent.ThreadSafe; +import java.util.concurrent.CompletableFuture; + +/** + * Asynchronous version of {@link ArangoView} + */ +@ThreadSafe +public interface ArangoViewAsync extends ArangoSerdeAccessor { + + /** + * The handler of the database the collection is within + * + * @return database handler + */ + ArangoDatabaseAsync db(); + + /** + * The name of the view + * + * @return view name + */ + String name(); + + /** + * Asynchronous version of {@link ArangoView#exists()} + */ + CompletableFuture exists(); + + /** + * Asynchronous version of {@link ArangoView#drop()} + */ + CompletableFuture drop(); + + /** + * Asynchronous version of {@link ArangoView#rename(String)} + */ + CompletableFuture rename(String newName); + + /** + * Asynchronous version of {@link ArangoView#getInfo()} + */ + CompletableFuture getInfo(); + +} diff --git a/core/src/main/java/com/arangodb/BaseArangoCursor.java b/core/src/main/java/com/arangodb/BaseArangoCursor.java new file mode 100644 index 000000000..6aeaaadd7 --- /dev/null +++ b/core/src/main/java/com/arangodb/BaseArangoCursor.java @@ -0,0 +1,23 @@ +package com.arangodb; + +import com.arangodb.entity.CursorEntity; + +import java.util.List; + +public interface BaseArangoCursor { + String getId(); + + Integer getCount(); + + Boolean isCached(); + + Boolean hasMore(); + + List getResult(); + + Boolean isPotentialDirtyRead(); + + String getNextBatchId(); + + CursorEntity.Extras getExtra(); +} diff --git a/core/src/main/java/com/arangodb/Compression.java b/core/src/main/java/com/arangodb/Compression.java new file mode 100644 index 000000000..d18d13d60 --- /dev/null +++ b/core/src/main/java/com/arangodb/Compression.java @@ -0,0 +1,7 @@ +package com.arangodb; + +public enum Compression { + NONE, + DEFLATE, + GZIP +} diff --git a/src/main/java/com/arangodb/Protocol.java b/core/src/main/java/com/arangodb/ContentType.java similarity index 85% rename from src/main/java/com/arangodb/Protocol.java rename to core/src/main/java/com/arangodb/ContentType.java index 2a02c3825..ec63aaf8d 100644 --- a/src/main/java/com/arangodb/Protocol.java +++ b/core/src/main/java/com/arangodb/ContentType.java @@ -1,29 +1,24 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb; - -/** - * @author Mark Vollmary - * - */ -public enum Protocol { - VST, HTTP_JSON, HTTP_VPACK -} +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ +package com.arangodb; + +public enum ContentType { + JSON, VPACK +} diff --git a/core/src/main/java/com/arangodb/PackageVersion.java.in b/core/src/main/java/com/arangodb/PackageVersion.java.in new file mode 100644 index 000000000..47991a195 --- /dev/null +++ b/core/src/main/java/com/arangodb/PackageVersion.java.in @@ -0,0 +1,18 @@ +package com.arangodb; + +/** + * Automatically generated from PackageVersion.java.in by replacer plugin. + */ +public final class PackageVersion { + public final static boolean SHADED = isShaded(); + public final static String VERSION = "@project.version@" + (isShaded() ? "-shaded" : ""); + + private static boolean isShaded() { + try { + Class.forName("com.arangodb.shaded.fasterxml.jackson.core.JsonFactory"); + return true; + } catch (ClassNotFoundException e) { + return false; + } + } +} diff --git a/core/src/main/java/com/arangodb/Protocol.java b/core/src/main/java/com/arangodb/Protocol.java new file mode 100644 index 000000000..1ca4bb4ea --- /dev/null +++ b/core/src/main/java/com/arangodb/Protocol.java @@ -0,0 +1,59 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb; + +/** + * @author Mark Vollmary + */ +public enum Protocol { + + /** + * VelocyStream + * + * @see VelocyStream specification + */ + VST, + + /** + * HTTP 1.1 with JSON body + */ + HTTP_JSON, + + /** + * HTTP 1.1 with VelocyPack body + * + * @see VelocyPack specification + */ + HTTP_VPACK, + + /** + * HTTP 2 with JSON body + */ + HTTP2_JSON, + + /** + * HTTP 2 with VelocyPack body + * + * @see VelocyPack specification + */ + HTTP2_VPACK, + +} diff --git a/core/src/main/java/com/arangodb/QueueTimeMetrics.java b/core/src/main/java/com/arangodb/QueueTimeMetrics.java new file mode 100644 index 000000000..328568922 --- /dev/null +++ b/core/src/main/java/com/arangodb/QueueTimeMetrics.java @@ -0,0 +1,48 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb; + +import com.arangodb.model.QueueTimeSample; + +import javax.annotation.concurrent.ThreadSafe; + +/** + * Interface for accessing queue time latency metrics, reported by the "X-Arango-Queue-Time-Seconds" response header. + * This header contains the most recent request (de)queuing time (in seconds) as tracked by the server’s scheduler. + * + * @author Michele Rastelli + * @see API Documentation + * @since ArangoDB 3.9 + */ +@ThreadSafe +public interface QueueTimeMetrics { + + /** + * @return all the n values observed + */ + QueueTimeSample[] getValues(); + + /** + * @return the average of the last n values observed, 0.0 if no value has been observed (i.e. in ArangoDB versions + * prior to 3.9). + */ + double getAvg(); +} diff --git a/core/src/main/java/com/arangodb/Request.java b/core/src/main/java/com/arangodb/Request.java new file mode 100644 index 000000000..f2a1acbe6 --- /dev/null +++ b/core/src/main/java/com/arangodb/Request.java @@ -0,0 +1,130 @@ +package com.arangodb; + +import java.util.HashMap; +import java.util.Map; + +public final class Request { + private final String db; + private final Method method; + private final String path; + private final Map queryParams; + private final Map headers; + private final T body; + + public enum Method { + DELETE, + GET, + POST, + PUT, + HEAD, + PATCH, + OPTIONS + } + + public static Builder builder() { + return new Builder<>(); + } + + private Request(String db, Method method, String path, Map queryParams, Map headers, T body) { + this.db = db; + this.method = method; + this.path = path; + this.queryParams = queryParams; + this.headers = headers; + this.body = body; + } + + public String getDb() { + return db; + } + + public Method getMethod() { + return method; + } + + public String getPath() { + return path; + } + + public Map getQueryParams() { + return queryParams; + } + + public Map getHeaders() { + return headers; + } + + public T getBody() { + return body; + } + + public static final class Builder { + private String db; + private Request.Method method; + private String path; + private final Map queryParams; + private final Map headers; + private T body; + + public Builder() { + queryParams = new HashMap<>(); + headers = new HashMap<>(); + } + + public Builder db(String db) { + this.db = db; + return this; + } + + public Builder method(Request.Method method) { + this.method = method; + return this; + } + + public Builder path(String path) { + this.path = path; + return this; + } + + public Builder queryParam(final String key, final String value) { + if (value != null) { + queryParams.put(key, value); + } + return this; + } + + public Builder queryParams(Map queryParams) { + if (queryParams != null) { + for (Map.Entry it : queryParams.entrySet()) { + queryParam(it.getKey(), it.getValue()); + } + } + return this; + } + + public Builder header(final String key, final String value) { + if (value != null) { + headers.put(key, value); + } + return this; + } + + public Builder headers(Map headers) { + if (headers != null) { + for (Map.Entry it : headers.entrySet()) { + header(it.getKey(), it.getValue()); + } + } + return this; + } + + public Builder body(T body) { + this.body = body; + return this; + } + + public Request build() { + return new Request<>(db, method, path, queryParams, headers, body); + } + } +} diff --git a/core/src/main/java/com/arangodb/RequestContext.java b/core/src/main/java/com/arangodb/RequestContext.java new file mode 100644 index 000000000..0ef8b61dc --- /dev/null +++ b/core/src/main/java/com/arangodb/RequestContext.java @@ -0,0 +1,18 @@ +package com.arangodb; + +import com.arangodb.internal.RequestContextImpl; + +import java.util.Optional; + +/** + * Context holding information about the current request and response. + */ +public interface RequestContext { + + RequestContext EMPTY = new RequestContextImpl(); + + /** + * @return the stream transaction id of the request (if any) or {@code null} + */ + Optional getStreamTransactionId(); +} diff --git a/core/src/main/java/com/arangodb/Response.java b/core/src/main/java/com/arangodb/Response.java new file mode 100644 index 000000000..49c37698b --- /dev/null +++ b/core/src/main/java/com/arangodb/Response.java @@ -0,0 +1,27 @@ +package com.arangodb; + +import java.util.Map; + +public final class Response { + private final int responseCode; + private final Map headers; + private final T body; + + public Response(int responseCode, Map headers, T body) { + this.responseCode = responseCode; + this.headers = headers; + this.body = body; + } + + public int getResponseCode() { + return responseCode; + } + + public Map getHeaders() { + return headers; + } + + public T getBody() { + return body; + } +} diff --git a/core/src/main/java/com/arangodb/SearchAlias.java b/core/src/main/java/com/arangodb/SearchAlias.java new file mode 100644 index 000000000..a0b47a442 --- /dev/null +++ b/core/src/main/java/com/arangodb/SearchAlias.java @@ -0,0 +1,87 @@ +/* + * DISCLAIMER + * + * Copyright 2018 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb; + +import com.arangodb.entity.ViewEntity; +import com.arangodb.entity.arangosearch.SearchAliasPropertiesEntity; +import com.arangodb.model.arangosearch.SearchAliasCreateOptions; +import com.arangodb.model.arangosearch.SearchAliasPropertiesOptions; + +/** + * Interface for operations on ArangoDB view level for SearchAlias views. + * + * @author Michele Rastelli + * @see View API Documentation + * @since ArangoDB 3.10 + */ +public interface SearchAlias extends ArangoView { + + /** + * Creates a view, then returns view information from the server. + * + * @return information about the view + * @see API + * Documentation + */ + ViewEntity create(); + + /** + * Creates a view with the given {@code options}, then returns view information from the server. + * + * @param options Additional options, can be null + * @return information about the view + * @see API + * Documentation + */ + ViewEntity create(SearchAliasCreateOptions options); + + /** + * Reads the properties of the specified view. + * + * @return properties of the view + * @see API + * Documentation + */ + SearchAliasPropertiesEntity getProperties(); + + /** + * Partially changes properties of the view. + * + * @param options properties to change + * @return properties of the view + * @see API + * Documentation + */ + SearchAliasPropertiesEntity updateProperties(SearchAliasPropertiesOptions options); + + /** + * Changes properties of the view. + * + * @param options properties to change + * @return properties of the view + * @see API + * Documentation + */ + SearchAliasPropertiesEntity replaceProperties(SearchAliasPropertiesOptions options); + +} diff --git a/core/src/main/java/com/arangodb/SearchAliasAsync.java b/core/src/main/java/com/arangodb/SearchAliasAsync.java new file mode 100644 index 000000000..22ea9280c --- /dev/null +++ b/core/src/main/java/com/arangodb/SearchAliasAsync.java @@ -0,0 +1,60 @@ +/* + * DISCLAIMER + * + * Copyright 2018 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb; + +import com.arangodb.entity.ViewEntity; +import com.arangodb.entity.arangosearch.SearchAliasPropertiesEntity; +import com.arangodb.model.arangosearch.SearchAliasCreateOptions; +import com.arangodb.model.arangosearch.SearchAliasPropertiesOptions; + +import java.util.concurrent.CompletableFuture; + +/** + * Asynchronous version of {@link SearchAlias} + */ +public interface SearchAliasAsync extends ArangoViewAsync { + + /** + * Asynchronous version of {@link SearchAlias#create()} + */ + CompletableFuture create(); + + /** + * Asynchronous version of {@link SearchAlias#create(SearchAliasCreateOptions)} + */ + CompletableFuture create(SearchAliasCreateOptions options); + + /** + * Asynchronous version of {@link SearchAlias#getProperties()} + */ + CompletableFuture getProperties(); + + /** + * Asynchronous version of {@link SearchAlias#updateProperties(SearchAliasPropertiesOptions)} + */ + CompletableFuture updateProperties(SearchAliasPropertiesOptions options); + + /** + * Asynchronous version of {@link SearchAlias#replaceProperties(SearchAliasPropertiesOptions)} + */ + CompletableFuture replaceProperties(SearchAliasPropertiesOptions options); + +} diff --git a/core/src/main/java/com/arangodb/arch/NoRawTypesInspection.java b/core/src/main/java/com/arangodb/arch/NoRawTypesInspection.java new file mode 100644 index 000000000..1031ca1c4 --- /dev/null +++ b/core/src/main/java/com/arangodb/arch/NoRawTypesInspection.java @@ -0,0 +1,16 @@ +package com.arangodb.arch; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * Skip invoking {@code JavaType#getAllInvolvedRawTypes()} on the target class during arch tests. + * Prevents StackOverflowError caused by this. + * FIXME: remove this when this is fixed and released + */ +@Retention(RetentionPolicy.CLASS) +@Target(ElementType.TYPE) +public @interface NoRawTypesInspection { +} diff --git a/core/src/main/java/com/arangodb/arch/UnstableApi.java b/core/src/main/java/com/arangodb/arch/UnstableApi.java new file mode 100644 index 000000000..5aac3338c --- /dev/null +++ b/core/src/main/java/com/arangodb/arch/UnstableApi.java @@ -0,0 +1,22 @@ +package com.arangodb.arch; + + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * Indicates a public API that has references to internal classes and that should change in the next major release. + * Referenced internal classes are annotated with {@link UsedInApi}. + * Architectural tests consider these annotation to tolerate referencing annotated elements. + */ +@Retention(RetentionPolicy.CLASS) +@Target({ + ElementType.TYPE, + ElementType.METHOD, + ElementType.PARAMETER, + ElementType.FIELD +}) +public @interface UnstableApi { +} diff --git a/core/src/main/java/com/arangodb/arch/UsedInApi.java b/core/src/main/java/com/arangodb/arch/UsedInApi.java new file mode 100644 index 000000000..5529a39ea --- /dev/null +++ b/core/src/main/java/com/arangodb/arch/UsedInApi.java @@ -0,0 +1,18 @@ +package com.arangodb.arch; + + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * Indicates an internal class referenced in public API, which should be therefore considered part of the public API. + * The annotated class and/or the referencing public API element should change in the next major release. + * Referencing element is annotated with {@link UnstableApi}. + * Architectural tests consider these annotation to tolerate referenced annotated elements. + */ +@Retention(RetentionPolicy.CLASS) +@Target(ElementType.TYPE) +public @interface UsedInApi { +} diff --git a/core/src/main/java/com/arangodb/config/ArangoConfigProperties.java b/core/src/main/java/com/arangodb/config/ArangoConfigProperties.java new file mode 100644 index 000000000..ee961acf1 --- /dev/null +++ b/core/src/main/java/com/arangodb/config/ArangoConfigProperties.java @@ -0,0 +1,177 @@ +package com.arangodb.config; + +import com.arangodb.Compression; +import com.arangodb.Protocol; +import com.arangodb.entity.LoadBalancingStrategy; +import com.arangodb.internal.config.ArangoConfigPropertiesImpl; + +import java.util.List; +import java.util.Optional; +import java.util.Properties; + +public interface ArangoConfigProperties { + + //region configuration properties keys + String KEY_HOSTS = "hosts"; + String KEY_PROTOCOL = "protocol"; + String KEY_USER = "user"; + String KEY_PASSWORD = "password"; + String KEY_JWT = "jwt"; + String KEY_TIMEOUT = "timeout"; + String KEY_USE_SSL = "useSsl"; + String KEY_SSL_CERT_VALUE = "sslCertValue"; + String KEY_SSL_ALGORITHM = "sslAlgorithm"; + String KEY_SSL_PROTOCOL = "sslProtocol"; + String KEY_VERIFY_HOST = "verifyHost"; + String KEY_CHUNK_SIZE = "chunkSize"; + String KEY_PIPELINING = "pipelining"; + String KEY_MAX_CONNECTIONS = "maxConnections"; + String KEY_CONNECTION_TTL = "connectionTtl"; + String KEY_KEEP_ALIVE_INTERVAL = "keepAliveInterval"; + String KEY_ACQUIRE_HOST_LIST = "acquireHostList"; + String KEY_ACQUIRE_HOST_LIST_INTERVAL = "acquireHostListInterval"; + String KEY_LOAD_BALANCING_STRATEGY = "loadBalancingStrategy"; + String KEY_RESPONSE_QUEUE_TIME_SAMPLES = "responseQueueTimeSamples"; + String KEY_COMPRESSION = "compression"; + String KEY_COMPRESSION_THRESHOLD = "compressionThreshold"; + String KEY_COMPRESSION_LEVEL = "compressionLevel"; + String KEY_SERDE_PROVIDER_CLASS = "serdeProviderClass"; + //endregion + + /** + * Reads properties from file arangodb.properties. + * Properties must be prefixed with @{code "arangodb"}, eg. @{code "arangodb.hosts=localhost:8529"}. + */ + static ArangoConfigProperties fromFile() { + return new ArangoConfigPropertiesImpl(); + } + + /** + * Reads properties from file {@code fileName}. + * Properties must be prefixed with @{code "arangodb"}, eg. @{code "arangodb.hosts=localhost:8529"}. + */ + static ArangoConfigProperties fromFile(final String fileName) { + return new ArangoConfigPropertiesImpl(fileName); + } + + /** + * Reads properties from file {@code fileName}. + * Properties must be prefixed with @{code prefix}, eg. @{code ".hosts=localhost:8529"}. + */ + static ArangoConfigProperties fromFile(final String fileName, final String prefix) { + return new ArangoConfigPropertiesImpl(fileName, prefix); + } + + /** + * Creates {@code ArangoConfigProperties} from Java properties ({@link java.util.Properties}). + * Properties must be prefixed with @{code "arangodb"}, eg. @{code "arangodb.hosts=localhost:8529"}. + */ + static ArangoConfigProperties fromProperties(final Properties properties) { + return new ArangoConfigPropertiesImpl(properties); + } + + /** + * Creates {@code ArangoConfigProperties} from Java properties ({@link java.util.Properties}). + * Properties must be prefixed with @{code prefix}, eg. @{code ".hosts=localhost:8529"}. + */ + static ArangoConfigProperties fromProperties(final Properties properties, final String prefix) { + return new ArangoConfigPropertiesImpl(properties, prefix); + } + + default Optional> getHosts() { + return Optional.empty(); + } + + default Optional getProtocol() { + return Optional.empty(); + } + + default Optional getUser() { + return Optional.empty(); + } + + default Optional getPassword() { + return Optional.empty(); + } + + default Optional getJwt() { + return Optional.empty(); + } + + default Optional getTimeout() { + return Optional.empty(); + } + + default Optional getUseSsl() { + return Optional.empty(); + } + + default Optional getSslCertValue() { + return Optional.empty(); + } + + default Optional getSslAlgorithm() { + return Optional.empty(); + } + + default Optional getSslProtocol() { + return Optional.empty(); + } + + default Optional getVerifyHost() { + return Optional.empty(); + } + + default Optional getChunkSize() { + return Optional.empty(); + } + + default Optional getPipelining() { + return Optional.empty(); + } + + default Optional getMaxConnections() { + return Optional.empty(); + } + + default Optional getConnectionTtl() { + return Optional.empty(); + } + + default Optional getKeepAliveInterval() { + return Optional.empty(); + } + + default Optional getAcquireHostList() { + return Optional.empty(); + } + + default Optional getAcquireHostListInterval() { + return Optional.empty(); + } + + default Optional getLoadBalancingStrategy() { + return Optional.empty(); + } + + default Optional getResponseQueueTimeSamples() { + return Optional.empty(); + } + + default Optional getCompression() { + return Optional.empty(); + } + + default Optional getCompressionThreshold() { + return Optional.empty(); + } + + default Optional getCompressionLevel() { + return Optional.empty(); + } + + default Optional getSerdeProviderClass() { + return Optional.empty(); + } + +} diff --git a/core/src/main/java/com/arangodb/config/HostDescription.java b/core/src/main/java/com/arangodb/config/HostDescription.java new file mode 100644 index 000000000..2e26177ed --- /dev/null +++ b/core/src/main/java/com/arangodb/config/HostDescription.java @@ -0,0 +1,81 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.config; + +import java.util.Objects; + +/** + * @author Mark Vollmary + * @author Michele Rastelli + */ +public class HostDescription { + + private final String host; + private final int port; + + /** + * Factory method used by MicroProfile Config as + * automatic converter. + * + * @param value hostname:port + * @return Host + */ + public static HostDescription parse(CharSequence value) { + Objects.requireNonNull(value); + final String[] split = value.toString().split(":"); + if (split.length != 2) { + throw new IllegalArgumentException("Could not parse host. Expected hostname:port, but got: " + value); + } + return new HostDescription(split[0], Integer.parseInt(split[1])); + } + + public HostDescription(final String host, final int port) { + super(); + this.host = host; + this.port = port; + } + + public String getHost() { + return host; + } + + public int getPort() { + return port; + } + + @Override + public String toString() { + return String.format("host[addr=%s,port=%s]", host, port); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + HostDescription that = (HostDescription) o; + return port == that.port && Objects.equals(host, that.host); + } + + @Override + public int hashCode() { + return Objects.hash(host, port); + } +} diff --git a/core/src/main/java/com/arangodb/config/ProtocolConfig.java b/core/src/main/java/com/arangodb/config/ProtocolConfig.java new file mode 100644 index 000000000..54432800d --- /dev/null +++ b/core/src/main/java/com/arangodb/config/ProtocolConfig.java @@ -0,0 +1,7 @@ +package com.arangodb.config; + +/** + * Configuration specific for {@link com.arangodb.internal.net.ProtocolProvider}. + */ +public interface ProtocolConfig { +} diff --git a/core/src/main/java/com/arangodb/entity/AbstractBaseDocument.java b/core/src/main/java/com/arangodb/entity/AbstractBaseDocument.java new file mode 100644 index 000000000..5019834c8 --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/AbstractBaseDocument.java @@ -0,0 +1,158 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity; + +import com.arangodb.internal.DocumentFields; +import com.fasterxml.jackson.annotation.JsonAnyGetter; +import com.fasterxml.jackson.annotation.JsonAnySetter; +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.fasterxml.jackson.annotation.JsonInclude; + +import java.io.Serializable; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; + +/** + * @author Mark Vollmary + * @author Michele Rastelli + */ +abstract class AbstractBaseDocument implements Serializable { + + private static final long serialVersionUID = 6985324876843525239L; + + private static final String[] META_PROPS = new String[]{ + DocumentFields.ID, + DocumentFields.KEY, + DocumentFields.REV + }; + private final HashMap properties; + + AbstractBaseDocument() { + properties = new HashMap<>(); + } + + AbstractBaseDocument(final String key) { + this(); + setKey(key); + } + + AbstractBaseDocument(final Map properties) { + this(); + setProperties(properties); + } + + @JsonIgnore + public String getId() { + return (String) getAttribute(DocumentFields.ID); + } + + public void setId(final String id) { + addAttribute(DocumentFields.ID, id); + } + + @JsonIgnore + public String getKey() { + return (String) getAttribute(DocumentFields.KEY); + } + + public void setKey(final String key) { + addAttribute(DocumentFields.KEY, key); + } + + @JsonIgnore + public String getRevision() { + return (String) getAttribute(DocumentFields.REV); + } + + public void setRevision(final String rev) { + addAttribute(DocumentFields.REV, rev); + } + + @JsonInclude + @JsonAnyGetter + public Map getProperties() { + return Collections.unmodifiableMap(properties); + } + + @JsonIgnore + public void setProperties(final Map props) { + for (String f : getMetaProps()) { + requireString(f, props.get(f)); + } + this.properties.putAll(props); + } + + public Object getAttribute(final String key) { + return properties.get(key); + } + + @JsonInclude + @JsonAnySetter + public void addAttribute(final String key, final Object value) { + for (String f : getMetaProps()) { + if (f.equals(key)) { + requireString(key, value); + } + } + properties.put(key, value); + } + + public void updateAttribute(final String key, final Object value) { + if (properties.containsKey(key)) { + addAttribute(key, value); + } + } + + public void removeAttribute(final String key) { + properties.remove(key); + } + + protected String[] getMetaProps() { + return META_PROPS; + } + + private void requireString(final String k, final Object v) { + if (v != null && !(v instanceof String)) { + throw new IllegalArgumentException(k + " must be a String"); + } + } + + String stringify() { + return "{" + + "properties=" + properties + + '}'; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + AbstractBaseDocument that = (AbstractBaseDocument) o; + return Objects.equals(properties, that.properties); + } + + @Override + public int hashCode() { + return Objects.hash(properties); + } +} diff --git a/core/src/main/java/com/arangodb/entity/AqlExecutionExplainEntity.java b/core/src/main/java/com/arangodb/entity/AqlExecutionExplainEntity.java new file mode 100644 index 000000000..eb56fc74b --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/AqlExecutionExplainEntity.java @@ -0,0 +1,395 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity; + +import com.arangodb.ArangoDatabase; +import com.arangodb.model.ExplainAqlQueryOptions; + +import java.util.Collection; +import java.util.Map; +import java.util.Objects; + +/** + * @author Mark Vollmary + * @deprecated for removal, use {@link ArangoDatabase#explainAqlQuery(String, Map, ExplainAqlQueryOptions)} instead + */ +@Deprecated +public final class AqlExecutionExplainEntity { + + private ExecutionPlan plan; + private Collection plans; + private Collection warnings; + private ExecutionStats stats; + private Boolean cacheable; + + public ExecutionPlan getPlan() { + return plan; + } + + public Collection getPlans() { + return plans; + } + + public Collection getWarnings() { + return warnings; + } + + public ExecutionStats getStats() { + return stats; + } + + public Boolean getCacheable() { + return cacheable; + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof AqlExecutionExplainEntity)) return false; + AqlExecutionExplainEntity that = (AqlExecutionExplainEntity) o; + return Objects.equals(plan, that.plan) && Objects.equals(plans, that.plans) && Objects.equals(warnings, that.warnings) && Objects.equals(stats, that.stats) && Objects.equals(cacheable, that.cacheable); + } + + @Override + public int hashCode() { + return Objects.hash(plan, plans, warnings, stats, cacheable); + } + + public static final class ExecutionPlan { + private Collection nodes; + private Collection rules; + private Collection collections; + private Collection variables; + private Integer estimatedCost; + private Integer estimatedNrItems; + + public Collection getNodes() { + return nodes; + } + + public Collection getRules() { + return rules; + } + + public Collection getCollections() { + return collections; + } + + public Collection getVariables() { + return variables; + } + + public Integer getEstimatedCost() { + return estimatedCost; + } + + public Integer getEstimatedNrItems() { + return estimatedNrItems; + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof ExecutionPlan)) return false; + ExecutionPlan that = (ExecutionPlan) o; + return Objects.equals(nodes, that.nodes) && Objects.equals(rules, that.rules) && Objects.equals(collections, that.collections) && Objects.equals(variables, that.variables) && Objects.equals(estimatedCost, that.estimatedCost) && Objects.equals(estimatedNrItems, that.estimatedNrItems); + } + + @Override + public int hashCode() { + return Objects.hash(nodes, rules, collections, variables, estimatedCost, estimatedNrItems); + } + } + + public static final class ExecutionNode { + private String type; + private Collection dependencies; + private Long id; + private Integer estimatedCost; + private Integer estimatedNrItems; + private Long depth; + private String database; + private String collection; + private ExecutionVariable inVariable; + private ExecutionVariable outVariable; + private ExecutionVariable conditionVariable; + private Boolean random; + private Long offset; + private Long limit; + private Boolean fullCount; + private ExecutionNode subquery; + private Boolean isConst; + private Boolean canThrow; + private String expressionType; + private Collection indexes; + private ExecutionExpression expression; + private ExecutionCollection condition; + private Boolean reverse; + + public String getType() { + return type; + } + + public Collection getDependencies() { + return dependencies; + } + + public Long getId() { + return id; + } + + public Integer getEstimatedCost() { + return estimatedCost; + } + + public Integer getEstimatedNrItems() { + return estimatedNrItems; + } + + public Long getDepth() { + return depth; + } + + public String getDatabase() { + return database; + } + + public String getCollection() { + return collection; + } + + public ExecutionVariable getInVariable() { + return inVariable; + } + + public ExecutionVariable getOutVariable() { + return outVariable; + } + + public ExecutionVariable getConditionVariable() { + return conditionVariable; + } + + public Boolean getRandom() { + return random; + } + + public Long getOffset() { + return offset; + } + + public Long getLimit() { + return limit; + } + + public Boolean getFullCount() { + return fullCount; + } + + public ExecutionNode getSubquery() { + return subquery; + } + + public Boolean getIsConst() { + return isConst; + } + + public Boolean getCanThrow() { + return canThrow; + } + + public String getExpressionType() { + return expressionType; + } + + public Collection getIndexes() { + return indexes; + } + + public ExecutionExpression getExpression() { + return expression; + } + + public ExecutionCollection getCondition() { + return condition; + } + + public Boolean getReverse() { + return reverse; + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof ExecutionNode)) return false; + ExecutionNode that = (ExecutionNode) o; + return Objects.equals(type, that.type) && Objects.equals(dependencies, that.dependencies) && Objects.equals(id, that.id) && Objects.equals(estimatedCost, that.estimatedCost) && Objects.equals(estimatedNrItems, that.estimatedNrItems) && Objects.equals(depth, that.depth) && Objects.equals(database, that.database) && Objects.equals(collection, that.collection) && Objects.equals(inVariable, that.inVariable) && Objects.equals(outVariable, that.outVariable) && Objects.equals(conditionVariable, that.conditionVariable) && Objects.equals(random, that.random) && Objects.equals(offset, that.offset) && Objects.equals(limit, that.limit) && Objects.equals(fullCount, that.fullCount) && Objects.equals(subquery, that.subquery) && Objects.equals(isConst, that.isConst) && Objects.equals(canThrow, that.canThrow) && Objects.equals(expressionType, that.expressionType) && Objects.equals(indexes, that.indexes) && Objects.equals(expression, that.expression) && Objects.equals(condition, that.condition) && Objects.equals(reverse, that.reverse); + } + + @Override + public int hashCode() { + return Objects.hash(type, dependencies, id, estimatedCost, estimatedNrItems, depth, database, collection, inVariable, outVariable, conditionVariable, random, offset, limit, fullCount, subquery, isConst, canThrow, expressionType, indexes, expression, condition, reverse); + } + } + + public static final class ExecutionVariable { + private Long id; + private String name; + + public Long getId() { + return id; + } + + public String getName() { + return name; + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof ExecutionVariable)) return false; + ExecutionVariable that = (ExecutionVariable) o; + return Objects.equals(id, that.id) && Objects.equals(name, that.name); + } + + @Override + public int hashCode() { + return Objects.hash(id, name); + } + } + + public static final class ExecutionExpression { + private String type; + private String name; + private Long id; + private Object value; + private Boolean sorted; + private String quantifier; + private Collection levels; + private Collection subNodes; + + public String getType() { + return type; + } + + public String getName() { + return name; + } + + public Long getId() { + return id; + } + + public Object getValue() { + return value; + } + + public Boolean getSorted() { + return sorted; + } + + public String getQuantifier() { + return quantifier; + } + + public Collection getLevels() { + return levels; + } + + public Collection getSubNodes() { + return subNodes; + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof ExecutionExpression)) return false; + ExecutionExpression that = (ExecutionExpression) o; + return Objects.equals(type, that.type) && Objects.equals(name, that.name) && Objects.equals(id, that.id) && Objects.equals(value, that.value) && Objects.equals(sorted, that.sorted) && Objects.equals(quantifier, that.quantifier) && Objects.equals(levels, that.levels) && Objects.equals(subNodes, that.subNodes); + } + + @Override + public int hashCode() { + return Objects.hash(type, name, id, value, sorted, quantifier, levels, subNodes); + } + } + + public static final class ExecutionCollection { + private String name; + private String type; + + public String getName() { + return name; + } + + public String getType() { + return type; + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof ExecutionCollection)) return false; + ExecutionCollection that = (ExecutionCollection) o; + return Objects.equals(name, that.name) && Objects.equals(type, that.type); + } + + @Override + public int hashCode() { + return Objects.hash(name, type); + } + } + + public static final class ExecutionStats { + private Integer rulesExecuted; + private Integer rulesSkipped; + private Integer plansCreated; + private Long peakMemoryUsage; + private Double executionTime; + + public Integer getRulesExecuted() { + return rulesExecuted; + } + + public Integer getRulesSkipped() { + return rulesSkipped; + } + + public Integer getPlansCreated() { + return plansCreated; + } + + public Long getPeakMemoryUsage() { + return peakMemoryUsage; + } + + public Double getExecutionTime() { + return executionTime; + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof ExecutionStats)) return false; + ExecutionStats that = (ExecutionStats) o; + return Objects.equals(rulesExecuted, that.rulesExecuted) && Objects.equals(rulesSkipped, that.rulesSkipped) && Objects.equals(plansCreated, that.plansCreated) && Objects.equals(peakMemoryUsage, that.peakMemoryUsage) && Objects.equals(executionTime, that.executionTime); + } + + @Override + public int hashCode() { + return Objects.hash(rulesExecuted, rulesSkipped, plansCreated, peakMemoryUsage, executionTime); + } + } + +} diff --git a/core/src/main/java/com/arangodb/entity/AqlFunctionEntity.java b/core/src/main/java/com/arangodb/entity/AqlFunctionEntity.java new file mode 100644 index 000000000..78ff58921 --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/AqlFunctionEntity.java @@ -0,0 +1,72 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity; + +import java.util.Objects; + +/** + * @author Mark Vollmary + */ +public final class AqlFunctionEntity { + + private String name; + private String code; + private Boolean isDeterministic; + + public AqlFunctionEntity() { + super(); + } + + /** + * @return The fully qualified name of the user function + */ + public String getName() { + return name; + } + + /** + * @return A string representation of the function body + */ + public String getCode() { + return code; + } + + /** + * @return An optional boolean value to indicate whether the function results are fully deterministic (function + * return value solely depends on the input value and return value is the same for repeated calls with same + * input). The isDeterministic attribute is currently not used but may be used later for optimizations. + */ + public Boolean getIsDeterministic() { + return isDeterministic; + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof AqlFunctionEntity)) return false; + AqlFunctionEntity that = (AqlFunctionEntity) o; + return Objects.equals(name, that.name) && Objects.equals(code, that.code) && Objects.equals(isDeterministic, that.isDeterministic); + } + + @Override + public int hashCode() { + return Objects.hash(name, code, isDeterministic); + } +} diff --git a/core/src/main/java/com/arangodb/entity/AqlParseEntity.java b/core/src/main/java/com/arangodb/entity/AqlParseEntity.java new file mode 100644 index 000000000..3dd7bf9ac --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/AqlParseEntity.java @@ -0,0 +1,99 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity; + +import java.util.Collection; +import java.util.Objects; + +/** + * @author Mark Vollmary + */ +public final class AqlParseEntity { + + private Collection collections; + private Collection bindVars; + private Collection ast; + + public Collection getCollections() { + return collections; + } + + public Collection getBindVars() { + return bindVars; + } + + public Collection getAst() { + return ast; + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof AqlParseEntity)) return false; + AqlParseEntity that = (AqlParseEntity) o; + return Objects.equals(collections, that.collections) && Objects.equals(bindVars, that.bindVars) && Objects.equals(ast, that.ast); + } + + @Override + public int hashCode() { + return Objects.hash(collections, bindVars, ast); + } + + public static final class AstNode { + private String type; + private Collection subNodes; + private String name; + private Long id; + private Object value; + + public String getType() { + return type; + } + + public Collection getSubNodes() { + return subNodes; + } + + public String getName() { + return name; + } + + public Long getId() { + return id; + } + + public Object getValue() { + return value; + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof AstNode)) return false; + AstNode astNode = (AstNode) o; + return Objects.equals(type, astNode.type) && Objects.equals(subNodes, astNode.subNodes) && Objects.equals(name, astNode.name) && Objects.equals(id, astNode.id) && Objects.equals(value, astNode.value); + } + + @Override + public int hashCode() { + return Objects.hash(type, subNodes, name, id, value); + } + } + +} diff --git a/core/src/main/java/com/arangodb/entity/AqlQueryExplainEntity.java b/core/src/main/java/com/arangodb/entity/AqlQueryExplainEntity.java new file mode 100644 index 000000000..c4eb9ea22 --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/AqlQueryExplainEntity.java @@ -0,0 +1,220 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity; + +import com.fasterxml.jackson.annotation.JsonAnySetter; + +import java.util.Collection; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; + +public final class AqlQueryExplainEntity { + + private ExecutionPlan plan; + private Collection plans; + private Collection warnings; + private ExecutionStats stats; + private Boolean cacheable; + + public ExecutionPlan getPlan() { + return plan; + } + + public Collection getPlans() { + return plans; + } + + public Collection getWarnings() { + return warnings; + } + + public ExecutionStats getStats() { + return stats; + } + + public Boolean getCacheable() { + return cacheable; + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof AqlQueryExplainEntity)) return false; + AqlQueryExplainEntity that = (AqlQueryExplainEntity) o; + return Objects.equals(plan, that.plan) && Objects.equals(plans, that.plans) && Objects.equals(warnings, that.warnings) && Objects.equals(stats, that.stats) && Objects.equals(cacheable, that.cacheable); + } + + @Override + public int hashCode() { + return Objects.hash(plan, plans, warnings, stats, cacheable); + } + + public static final class ExecutionPlan { + private final Map properties = new HashMap<>(); + private Collection nodes; + private Double estimatedCost; + private Collection collections; + private Collection rules; + private Collection variables; + + @JsonAnySetter + public void add(String key, Object value) { + properties.put(key, value); + } + + public Object get(String key) { + return properties.get(key); + } + + public Collection getNodes() { + return nodes; + } + + public Double getEstimatedCost() { + return estimatedCost; + } + + public Collection getCollections() { + return collections; + } + + public Collection getRules() { + return rules; + } + + public Collection getVariables() { + return variables; + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof ExecutionPlan)) return false; + ExecutionPlan that = (ExecutionPlan) o; + return Objects.equals(properties, that.properties) && Objects.equals(nodes, that.nodes) && Objects.equals(estimatedCost, that.estimatedCost) && Objects.equals(collections, that.collections) && Objects.equals(rules, that.rules) && Objects.equals(variables, that.variables); + } + + @Override + public int hashCode() { + return Objects.hash(properties, nodes, estimatedCost, collections, rules, variables); + } + } + + public static final class ExecutionNode { + private final Map properties = new HashMap<>(); + + @JsonAnySetter + public void add(String key, Object value) { + properties.put(key, value); + } + + public Object get(String key) { + return properties.get(key); + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof ExecutionNode)) return false; + ExecutionNode that = (ExecutionNode) o; + return Objects.equals(properties, that.properties); + } + + @Override + public int hashCode() { + return Objects.hashCode(properties); + } + } + + public static final class ExecutionVariable { + private final Map properties = new HashMap<>(); + + @JsonAnySetter + public void add(String key, Object value) { + properties.put(key, value); + } + + public Object get(String key) { + return properties.get(key); + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof ExecutionVariable)) return false; + ExecutionVariable that = (ExecutionVariable) o; + return Objects.equals(properties, that.properties); + } + + @Override + public int hashCode() { + return Objects.hashCode(properties); + } + } + + public static final class ExecutionCollection { + private final Map properties = new HashMap<>(); + + @JsonAnySetter + public void add(String key, Object value) { + properties.put(key, value); + } + + public Object get(String key) { + return properties.get(key); + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof ExecutionCollection)) return false; + ExecutionCollection that = (ExecutionCollection) o; + return Objects.equals(properties, that.properties); + } + + @Override + public int hashCode() { + return Objects.hashCode(properties); + } + } + + public static final class ExecutionStats { + private final Map properties = new HashMap<>(); + + @JsonAnySetter + public void add(String key, Object value) { + properties.put(key, value); + } + + public Object get(String key) { + return properties.get(key); + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof ExecutionStats)) return false; + ExecutionStats that = (ExecutionStats) o; + return Objects.equals(properties, that.properties); + } + + @Override + public int hashCode() { + return Objects.hashCode(properties); + } + } + +} diff --git a/src/main/java/com/arangodb/entity/DocumentField.java b/core/src/main/java/com/arangodb/entity/ArangoDBEngine.java similarity index 52% rename from src/main/java/com/arangodb/entity/DocumentField.java rename to core/src/main/java/com/arangodb/entity/ArangoDBEngine.java index cf4b22f97..30f811800 100644 --- a/src/main/java/com/arangodb/entity/DocumentField.java +++ b/core/src/main/java/com/arangodb/entity/ArangoDBEngine.java @@ -1,52 +1,59 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.entity; - -import java.lang.annotation.ElementType; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; - -/** - * @author Mark Vollmary - * - */ -@Retention(RetentionPolicy.RUNTIME) -@Target({ ElementType.FIELD }) -public @interface DocumentField { - - public static enum Type { - ID("_id"), KEY("_key"), REV("_rev"), FROM("_from"), TO("_to"); - - private final String serializeName; - - private Type(final String serializeName) { - this.serializeName = serializeName; - } - - public String getSerializeName() { - return serializeName; - } - } - - Type value(); - -} +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity; + +import java.util.Objects; + +/** + * @author Michele Rastelli + */ +public final class ArangoDBEngine { + + private StorageEngineName name; + + public ArangoDBEngine() { + super(); + } + + /** + * @return the storage engine name + */ + public StorageEngineName getName() { + return name; + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof ArangoDBEngine)) return false; + ArangoDBEngine that = (ArangoDBEngine) o; + return name == that.name; + } + + @Override + public int hashCode() { + return Objects.hashCode(name); + } + + public enum StorageEngineName { + mmfiles, rocksdb + } + +} diff --git a/core/src/main/java/com/arangodb/entity/ArangoDBVersion.java b/core/src/main/java/com/arangodb/entity/ArangoDBVersion.java new file mode 100644 index 000000000..6fd696166 --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/ArangoDBVersion.java @@ -0,0 +1,71 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity; + +import java.util.Objects; + +/** + * @author Mark Vollmary + */ +public final class ArangoDBVersion { + + private String server; + private String version; + private License license; + + public ArangoDBVersion() { + super(); + } + + /** + * @return will always contain arango + */ + public String getServer() { + return server; + } + + /** + * @return the server version string. The string has the format "major.minor.sub". major and minor will be numeric, + * and sub may contain a number or a textual version. + */ + public String getVersion() { + return version; + } + + /** + * @return the license + */ + public License getLicense() { + return license; + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof ArangoDBVersion)) return false; + ArangoDBVersion that = (ArangoDBVersion) o; + return Objects.equals(server, that.server) && Objects.equals(version, that.version) && license == that.license; + } + + @Override + public int hashCode() { + return Objects.hash(server, version, license); + } +} \ No newline at end of file diff --git a/src/main/java/com/arangodb/entity/EdgeUpdateEntity.java b/core/src/main/java/com/arangodb/entity/BaseDocument.java similarity index 64% rename from src/main/java/com/arangodb/entity/EdgeUpdateEntity.java rename to core/src/main/java/com/arangodb/entity/BaseDocument.java index 15f65697f..0e8860e74 100644 --- a/src/main/java/com/arangodb/entity/EdgeUpdateEntity.java +++ b/core/src/main/java/com/arangodb/entity/BaseDocument.java @@ -1,43 +1,48 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.entity; - -import com.arangodb.velocypack.annotations.SerializedName; - -/** - * @author Mark Vollmary - * - * @see API Documentation - */ -public class EdgeUpdateEntity extends DocumentEntity { - - @SerializedName("_oldRev") - private String oldRev; - - public EdgeUpdateEntity() { - super(); - } - - public String getOldRev() { - return oldRev; - } - -} +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity; + +import java.util.Map; + +/** + * @author Mark Vollmary + * @author Michele Rastelli + */ +public class BaseDocument extends AbstractBaseDocument { + + public BaseDocument() { + super(); + } + + public BaseDocument(final String key) { + super(key); + } + + public BaseDocument(final Map properties) { + super(properties); + } + + @Override + public String toString() { + return "BaseDocument" + stringify(); + } + +} diff --git a/core/src/main/java/com/arangodb/entity/BaseEdgeDocument.java b/core/src/main/java/com/arangodb/entity/BaseEdgeDocument.java new file mode 100644 index 000000000..46abcc246 --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/BaseEdgeDocument.java @@ -0,0 +1,90 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity; + +import com.arangodb.internal.DocumentFields; +import com.fasterxml.jackson.annotation.JsonIgnore; + +import java.util.Map; + +/** + * @author Mark Vollmary + * @author Michele Rastelli + */ +public class BaseEdgeDocument extends BaseDocument { + + private static final String[] META_PROPS = new String[]{ + DocumentFields.ID, + DocumentFields.KEY, + DocumentFields.REV, + DocumentFields.FROM, + DocumentFields.TO + }; + + public BaseEdgeDocument() { + super(); + } + + public BaseEdgeDocument(final String from, final String to) { + super(); + setFrom(from); + setTo(to); + } + + public BaseEdgeDocument(final String key, final String from, final String to) { + super(key); + setFrom(from); + setTo(to); + } + + public BaseEdgeDocument(final Map properties) { + super(properties); + } + + @JsonIgnore + public String getFrom() { + return (String) getAttribute(DocumentFields.FROM); + } + + public void setFrom(final String from) { + addAttribute(DocumentFields.FROM, from); + } + + @JsonIgnore + public String getTo() { + return (String) getAttribute(DocumentFields.TO); + } + + public void setTo(final String to) { + addAttribute(DocumentFields.TO, to); + } + + @Override + protected String[] getMetaProps() { + return META_PROPS; + } + + @Override + public String toString() { + return "BaseEdgeDocument" + stringify(); + } + +} diff --git a/core/src/main/java/com/arangodb/entity/CollectionEntity.java b/core/src/main/java/com/arangodb/entity/CollectionEntity.java new file mode 100644 index 000000000..45ddeaf12 --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/CollectionEntity.java @@ -0,0 +1,99 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity; + +import com.arangodb.model.CollectionSchema; +import com.arangodb.model.ComputedValue; + +import java.util.List; +import java.util.Objects; + +/** + * @author Mark Vollmary + */ +public class CollectionEntity { + + private String id; + private String name; + private Boolean waitForSync; + private Boolean isSystem; + private CollectionStatus status; + private CollectionType type; + private CollectionSchema schema; + private List computedValues; + + public CollectionEntity() { + super(); + } + + public String getId() { + return id; + } + + public String getName() { + return name; + } + + public Boolean getWaitForSync() { + return waitForSync; + } + + public Boolean getIsSystem() { + return isSystem; + } + + @Deprecated + public CollectionStatus getStatus() { + return status; + } + + public CollectionType getType() { + return type; + } + + /** + * @return Optional object that specifies the collection level schema for documents. + * @since ArangoDB 3.7 + */ + public CollectionSchema getSchema() { + return schema; + } + + /** + * @return A list of computed values. + * @since ArangoDB 3.10 + */ + public List getComputedValues() { + return computedValues; + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof CollectionEntity)) return false; + CollectionEntity that = (CollectionEntity) o; + return Objects.equals(id, that.id) && Objects.equals(name, that.name) && Objects.equals(waitForSync, that.waitForSync) && Objects.equals(isSystem, that.isSystem) && status == that.status && type == that.type && Objects.equals(schema, that.schema) && Objects.equals(computedValues, that.computedValues); + } + + @Override + public int hashCode() { + return Objects.hash(id, name, waitForSync, isSystem, status, type, schema, computedValues); + } +} diff --git a/core/src/main/java/com/arangodb/entity/CollectionPropertiesEntity.java b/core/src/main/java/com/arangodb/entity/CollectionPropertiesEntity.java new file mode 100644 index 000000000..8f5076639 --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/CollectionPropertiesEntity.java @@ -0,0 +1,165 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity; + +import java.util.Collection; +import java.util.Objects; + +/** + * @author Mark Vollmary + */ +public final class CollectionPropertiesEntity extends CollectionEntity { + + private Boolean cacheEnabled; + private String distributeShardsLike; + private Boolean isDisjoint; + private Boolean isSmart; + private KeyOptions keyOptions; + private Integer numberOfShards; + private ReplicationFactor replicationFactor; + private Collection shardKeys; + private String shardingStrategy; // cluster option + private String smartGraphAttribute; + private String smartJoinAttribute; // enterprise option + private Integer writeConcern; + private Long count; + + public CollectionPropertiesEntity() { + super(); + } + + public Boolean getCacheEnabled() { + return cacheEnabled; + } + + public void setCacheEnabled(Boolean cacheEnabled) { + this.cacheEnabled = cacheEnabled; + } + + public String getDistributeShardsLike() { + return distributeShardsLike; + } + + public void setDistributeShardsLike(String distributeShardsLike) { + this.distributeShardsLike = distributeShardsLike; + } + + public Boolean getDisjoint() { + return isDisjoint; + } + + public void setDisjoint(Boolean disjoint) { + isDisjoint = disjoint; + } + + public Boolean getSmart() { + return isSmart; + } + + public void setSmart(Boolean smart) { + isSmart = smart; + } + + public KeyOptions getKeyOptions() { + return keyOptions; + } + + public void setKeyOptions(KeyOptions keyOptions) { + this.keyOptions = keyOptions; + } + + public Integer getNumberOfShards() { + return numberOfShards; + } + + public void setNumberOfShards(Integer numberOfShards) { + this.numberOfShards = numberOfShards; + } + + public ReplicationFactor getReplicationFactor() { + return replicationFactor; + } + + public void setReplicationFactor(ReplicationFactor replicationFactor) { + this.replicationFactor = replicationFactor; + } + + public Collection getShardKeys() { + return shardKeys; + } + + public void setShardKeys(Collection shardKeys) { + this.shardKeys = shardKeys; + } + + public String getShardingStrategy() { + return shardingStrategy; + } + + public void setShardingStrategy(String shardingStrategy) { + this.shardingStrategy = shardingStrategy; + } + + public String getSmartGraphAttribute() { + return smartGraphAttribute; + } + + public void setSmartGraphAttribute(String smartGraphAttribute) { + this.smartGraphAttribute = smartGraphAttribute; + } + + public String getSmartJoinAttribute() { + return smartJoinAttribute; + } + + public void setSmartJoinAttribute(String smartJoinAttribute) { + this.smartJoinAttribute = smartJoinAttribute; + } + + public Integer getWriteConcern() { + return writeConcern; + } + + public void setWriteConcern(Integer writeConcern) { + this.writeConcern = writeConcern; + } + + public Long getCount() { + return count; + } + + public void setCount(Long count) { + this.count = count; + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof CollectionPropertiesEntity)) return false; + if (!super.equals(o)) return false; + CollectionPropertiesEntity that = (CollectionPropertiesEntity) o; + return Objects.equals(cacheEnabled, that.cacheEnabled) && Objects.equals(distributeShardsLike, that.distributeShardsLike) && Objects.equals(isDisjoint, that.isDisjoint) && Objects.equals(isSmart, that.isSmart) && Objects.equals(keyOptions, that.keyOptions) && Objects.equals(numberOfShards, that.numberOfShards) && Objects.equals(replicationFactor, that.replicationFactor) && Objects.equals(shardKeys, that.shardKeys) && Objects.equals(shardingStrategy, that.shardingStrategy) && Objects.equals(smartGraphAttribute, that.smartGraphAttribute) && Objects.equals(smartJoinAttribute, that.smartJoinAttribute) && Objects.equals(writeConcern, that.writeConcern) && Objects.equals(count, that.count); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), cacheEnabled, distributeShardsLike, isDisjoint, isSmart, keyOptions, numberOfShards, replicationFactor, shardKeys, shardingStrategy, smartGraphAttribute, smartJoinAttribute, writeConcern, count); + } +} diff --git a/src/main/java/com/arangodb/entity/AqlFunctionEntity.java b/core/src/main/java/com/arangodb/entity/CollectionRevisionEntity.java similarity index 56% rename from src/main/java/com/arangodb/entity/AqlFunctionEntity.java rename to core/src/main/java/com/arangodb/entity/CollectionRevisionEntity.java index c5609d68a..02e8e2ae3 100644 --- a/src/main/java/com/arangodb/entity/AqlFunctionEntity.java +++ b/core/src/main/java/com/arangodb/entity/CollectionRevisionEntity.java @@ -1,53 +1,48 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.entity; - -/** - * @author Mark Vollmary - * - * @see API - * Documentation - */ -public class AqlFunctionEntity { - - private String name; - private String code; - - public AqlFunctionEntity() { - super(); - } - - /** - * @return The fully qualified name of the user function - */ - public String getName() { - return name; - } - - /** - * @return A string representation of the function body - */ - public String getCode() { - return code; - } - -} +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity; + +import java.util.Objects; + +/** + * @author Mark Vollmary + */ +public final class CollectionRevisionEntity extends CollectionEntity { + + private String revision; + + public String getRevision() { + return revision; + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof CollectionRevisionEntity)) return false; + if (!super.equals(o)) return false; + CollectionRevisionEntity that = (CollectionRevisionEntity) o; + return Objects.equals(revision, that.revision); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), revision); + } +} diff --git a/src/main/java/com/arangodb/entity/CollectionStatus.java b/core/src/main/java/com/arangodb/entity/CollectionStatus.java similarity index 60% rename from src/main/java/com/arangodb/entity/CollectionStatus.java rename to core/src/main/java/com/arangodb/entity/CollectionStatus.java index 0e9f78be4..39b7863b9 100644 --- a/src/main/java/com/arangodb/entity/CollectionStatus.java +++ b/core/src/main/java/com/arangodb/entity/CollectionStatus.java @@ -1,50 +1,50 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.entity; - -/** - * @author Mark Vollmary - * - */ -public enum CollectionStatus { - - NEW_BORN_COLLECTION(1), UNLOADED(2), LOADED(3), IN_THE_PROCESS_OF_BEING_UNLOADED(4), DELETED(5); - - private final int status; - - private CollectionStatus(final int status) { - this.status = status; - } - - public int getStatus() { - return status; - } - - public static CollectionStatus fromStatus(final int status) { - for (final CollectionStatus cStatus : CollectionStatus.values()) { - if (cStatus.status == status) { - return cStatus; - } - } - return null; - } - -} +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity; + +/** + * @author Mark Vollmary + */ +@Deprecated +public enum CollectionStatus { + + LOADED(3), DELETED(5); + + private final int status; + + CollectionStatus(final int status) { + this.status = status; + } + + public static CollectionStatus fromStatus(final int status) { + for (final CollectionStatus cStatus : CollectionStatus.values()) { + if (cStatus.status == status) { + return cStatus; + } + } + return null; + } + + public int getStatus() { + return status; + } + +} diff --git a/src/main/java/com/arangodb/entity/CollectionType.java b/core/src/main/java/com/arangodb/entity/CollectionType.java similarity index 64% rename from src/main/java/com/arangodb/entity/CollectionType.java rename to core/src/main/java/com/arangodb/entity/CollectionType.java index 963358bab..7d771b67d 100644 --- a/src/main/java/com/arangodb/entity/CollectionType.java +++ b/core/src/main/java/com/arangodb/entity/CollectionType.java @@ -1,49 +1,48 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.entity; - -/** - * @author Mark Vollmary - * - */ -public enum CollectionType { - - DOCUMENT(2), EDGES(3); - - private final int type; - - private CollectionType(final int type) { - this.type = type; - } - - public int getType() { - return type; - } - - public static CollectionType fromType(final int type) { - for (final CollectionType cType : CollectionType.values()) { - if (cType.type == type) { - return cType; - } - } - return null; - } -} +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity; + +/** + * @author Mark Vollmary + */ +public enum CollectionType { + + DOCUMENT(2), EDGES(3); + + private final int type; + + CollectionType(final int type) { + this.type = type; + } + + public static CollectionType fromType(final int type) { + for (final CollectionType cType : CollectionType.values()) { + if (cType.type == type) { + return cType; + } + } + return null; + } + + public int getType() { + return type; + } +} diff --git a/core/src/main/java/com/arangodb/entity/CursorEntity.java b/core/src/main/java/com/arangodb/entity/CursorEntity.java new file mode 100644 index 000000000..6070ddc1a --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/CursorEntity.java @@ -0,0 +1,152 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity; + +import com.arangodb.internal.serde.UserDataInside; + +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Objects; + +/** + * @author Mark Vollmary + */ +public final class CursorEntity { + private String id; + private Integer count; + private Boolean cached; + private Boolean hasMore; + @UserDataInside + private List result; + private Boolean potentialDirtyRead; + private String nextBatchId; + private final Extras extra = new Extras(); + + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } + + /** + * @return the total number of result documents available (only available if the query was executed with the count + * attribute set) + */ + public Integer getCount() { + return count; + } + + /** + * @return an optional object with extra information about the query result contained in its stats sub-attribute. + * For data-modification queries, the extra.stats sub-attribute will contain the number of modified + * documents and the number of documents that could not be modified due to an error (if ignoreErrors query + * option is specified) + */ + public Extras getExtra() { + return extra; + } + + /** + * @return a boolean flag indicating whether the query result was served from the query cache or not. If the query + * result is served from the query cache, the extra return attribute will not contain any stats + * sub-attribute and no profile sub-attribute. + */ + public Boolean getCached() { + return cached; + } + + /** + * @return A boolean indicator whether there are more results available for the cursor on the server + */ + public Boolean getHasMore() { + return hasMore; + } + + /** + * @return a list of result documents (might be empty if query has no results) + */ + public List getResult() { + return result; + } + + /** + * @return true if the result is a potential dirty read + * @since ArangoDB 3.10 + */ + public Boolean isPotentialDirtyRead() { + return potentialDirtyRead; + } + + public void setPotentialDirtyRead(final Boolean potentialDirtyRead) { + this.potentialDirtyRead = potentialDirtyRead; + } + + /** + * @return The ID of the batch after the current one. The first batch has an ID of 1 and the value is incremented by + * 1 with every batch. Only set if the allowRetry query option is enabled. + * @since ArangoDB 3.11 + */ + public String getNextBatchId() { + return nextBatchId; + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof CursorEntity)) return false; + CursorEntity that = (CursorEntity) o; + return Objects.equals(id, that.id) && Objects.equals(count, that.count) && Objects.equals(cached, that.cached) && Objects.equals(hasMore, that.hasMore) && Objects.equals(result, that.result) && Objects.equals(potentialDirtyRead, that.potentialDirtyRead) && Objects.equals(nextBatchId, that.nextBatchId) && Objects.equals(extra, that.extra); + } + + @Override + public int hashCode() { + return Objects.hash(id, count, cached, hasMore, result, potentialDirtyRead, nextBatchId, extra); + } + + public static final class Extras { + private final Collection warnings = Collections.emptyList(); + private CursorStats stats; + + public CursorStats getStats() { + return stats; + } + + public Collection getWarnings() { + return warnings; + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof Extras)) return false; + Extras extras = (Extras) o; + return Objects.equals(warnings, extras.warnings) && Objects.equals(stats, extras.stats); + } + + @Override + public int hashCode() { + return Objects.hash(warnings, stats); + } + } + +} + diff --git a/core/src/main/java/com/arangodb/entity/CursorStats.java b/core/src/main/java/com/arangodb/entity/CursorStats.java new file mode 100644 index 000000000..2d5ce96a3 --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/CursorStats.java @@ -0,0 +1,187 @@ +package com.arangodb.entity; + +import com.fasterxml.jackson.annotation.JsonAnySetter; + +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; + +public final class CursorStats { + private final Map properties = new HashMap<>(); + private Long writesExecuted; + private Long writesIgnored; + private Long scannedFull; + private Long scannedIndex; + private Long cursorsCreated; + private Long cursorsRearmed; + private Long cacheHits; + private Long cacheMisses; + private Long filtered; + private Long httpRequests; + private Long fullCount; + private Double executionTime; + private Long peakMemoryUsage; + private Integer documentLookups; + private Integer intermediateCommits; + private Integer seeks; + + @JsonAnySetter + public void add(String key, Object value) { + properties.put(key, value); + } + + public Object get(String key) { + return properties.get(key); + } + + /** + * @return The total number of data-modification operations successfully executed. + */ + public Long getWritesExecuted() { + return writesExecuted; + } + + /** + * @return The total number of data-modification operations that were unsuccessful, but have been ignored because of + * the ignoreErrors query option. + */ + public Long getWritesIgnored() { + return writesIgnored; + } + + /** + * @return The total number of documents iterated over when scanning a collection without an index. Documents + * scanned by subqueries are included in the result, but operations triggered by built-in or user-defined AQL + * functions are not. + */ + public Long getScannedFull() { + return scannedFull; + } + + /** + * @return The total number of documents iterated over when scanning a collection using an index. Documents scanned + * by subqueries are included in the result, but operations triggered by built-in or user-defined AQL functions are + * not. + */ + public Long getScannedIndex() { + return scannedIndex; + } + + /** + * @return The total number of cursor objects created during query execution. Cursor objects are created for index + * lookups. + */ + public Long getCursorsCreated() { + return cursorsCreated; + } + + /** + * @return The total number of times an existing cursor object was repurposed. Repurposing an existing cursor object + * is normally more efficient compared to destroying an existing cursor object and creating a new one from scratch. + */ + public Long getCursorsRearmed() { + return cursorsRearmed; + } + + /** + * @return The total number of index entries read from in-memory caches for indexes of type edge or persistent. This + * value is only non-zero when reading from indexes that have an in-memory cache enabled, and when the query allows + * using the in-memory cache (i.e. using equality lookups on all index attributes). + */ + public Long getCacheHits() { + return cacheHits; + } + + /** + * @return The total number of cache read attempts for index entries that could not be served from in-memory caches + * for indexes of type edge or persistent. This value is only non-zero when reading from indexes that have an + * in-memory cache enabled, the query allows using the in-memory cache (i.e. using equality lookups on all index + * attributes) and the looked up values are not present in the cache. + */ + public Long getCacheMisses() { + return cacheMisses; + } + + /** + * @return The total number of documents removed after executing a filter condition in a FilterNode or another node + * that post-filters data. Note that nodes of the IndexNode type can also filter documents by selecting only the + * required index range from a collection, and the filtered value only indicates how much filtering was done by a + * post filter in the IndexNode itself or following FilterNode nodes. Nodes of the EnumerateCollectionNode and + * TraversalNode types can also apply filter conditions and can report the number of filtered documents. + */ + public Long getFiltered() { + return filtered; + } + + /** + * @return The total number of cluster-internal HTTP requests performed. + */ + public Long getHttpRequests() { + return httpRequests; + } + + /** + * @return The total number of documents that matched the search condition if the query’s final top-level LIMIT + * operation were not present. This attribute may only be returned if the fullCount option was set when starting the + * query and only contains a sensible value if the query contains a LIMIT operation on the top level. + */ + public Long getFullCount() { + return fullCount; + } + + /** + * @return The query execution time (wall-clock time) in seconds. + */ + public Double getExecutionTime() { + return executionTime; + } + + /** + * @return The maximum memory usage of the query while it was running. In a cluster, the memory accounting is done + * per shard, and the memory usage reported is the peak memory usage value from the individual shards. Note that to + * keep things lightweight, the per-query memory usage is tracked on a relatively high level, not including any + * memory allocator overhead nor any memory used for temporary results calculations (e.g. memory + * allocated/deallocated inside AQL expressions and function calls). + */ + public Long getPeakMemoryUsage() { + return peakMemoryUsage; + } + + /** + * @return The number of real document lookups caused by late materialization as well as `IndexNode`s that had to + * load document attributes not covered by the index. This is how many documents had to be fetched from storage + * after an index scan that initially covered the attribute access for these documents. + */ + public Integer getDocumentLookups() { + return documentLookups; + } + + /** + * @return The total number of intermediate commits the query has performed. This number can only be greater than + * zero for data-modification queries that perform modifications beyond the `--rocksdb.intermediate-commit-count` + * or `--rocksdb.intermediate-commit-size` thresholds. In a cluster, the intermediate commits are tracked per + * DB-Server that participates in the query and are summed up in the end. + */ + public Integer getIntermediateCommits() { + return intermediateCommits; + } + + /** + * @return The number of seek calls done by RocksDB iterators for merge joins (`JoinNode` in the execution plan). + */ + public Integer getSeeks() { + return seeks; + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof CursorStats)) return false; + CursorStats that = (CursorStats) o; + return Objects.equals(properties, that.properties) && Objects.equals(writesExecuted, that.writesExecuted) && Objects.equals(writesIgnored, that.writesIgnored) && Objects.equals(scannedFull, that.scannedFull) && Objects.equals(scannedIndex, that.scannedIndex) && Objects.equals(cursorsCreated, that.cursorsCreated) && Objects.equals(cursorsRearmed, that.cursorsRearmed) && Objects.equals(cacheHits, that.cacheHits) && Objects.equals(cacheMisses, that.cacheMisses) && Objects.equals(filtered, that.filtered) && Objects.equals(httpRequests, that.httpRequests) && Objects.equals(fullCount, that.fullCount) && Objects.equals(executionTime, that.executionTime) && Objects.equals(peakMemoryUsage, that.peakMemoryUsage) && Objects.equals(documentLookups, that.documentLookups) && Objects.equals(intermediateCommits, that.intermediateCommits) && Objects.equals(seeks, that.seeks); + } + + @Override + public int hashCode() { + return Objects.hash(properties, writesExecuted, writesIgnored, scannedFull, scannedIndex, cursorsCreated, cursorsRearmed, cacheHits, cacheMisses, filtered, httpRequests, fullCount, executionTime, peakMemoryUsage, documentLookups, intermediateCommits, seeks); + } +} diff --git a/core/src/main/java/com/arangodb/entity/CursorWarning.java b/core/src/main/java/com/arangodb/entity/CursorWarning.java new file mode 100644 index 000000000..96d541efe --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/CursorWarning.java @@ -0,0 +1,29 @@ +package com.arangodb.entity; + +import java.util.Objects; + +public final class CursorWarning { + + private Integer code; + private String message; + + public Integer getCode() { + return code; + } + + public String getMessage() { + return message; + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof CursorWarning)) return false; + CursorWarning that = (CursorWarning) o; + return Objects.equals(code, that.code) && Objects.equals(message, that.message); + } + + @Override + public int hashCode() { + return Objects.hash(code, message); + } +} diff --git a/core/src/main/java/com/arangodb/entity/DatabaseEntity.java b/core/src/main/java/com/arangodb/entity/DatabaseEntity.java new file mode 100644 index 000000000..73df87062 --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/DatabaseEntity.java @@ -0,0 +1,109 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity; + +import java.util.Objects; + +/** + * @author Mark Vollmary + */ +public final class DatabaseEntity { + + private String id; + private String name; + private String path; + private Boolean isSystem; + private ReplicationFactor replicationFactor; + private Integer writeConcern; + private String sharding; + + public DatabaseEntity() { + super(); + } + + /** + * @return the id of the database + */ + public String getId() { + return id; + } + + /** + * @return the name of the database + */ + public String getName() { + return name; + } + + /** + * @return the filesystem path of the database + */ + public String getPath() { + return path; + } + + /** + * @return whether or not the database is the _system database + */ + public Boolean getIsSystem() { + return isSystem; + } + + /** + * @return the default replication factor for collections in this database + * @since ArangoDB 3.6.0 + */ + public ReplicationFactor getReplicationFactor() { + return replicationFactor; + } + + /** + * Default write concern for new collections created in this database. It determines how many copies of each shard + * are required to be in sync on the different DBServers. If there are less then these many copies in the cluster a + * shard will refuse to write. Writes to shards with enough up-to-date copies will succeed at the same time however. + * The value of writeConcern can not be larger than replicationFactor. (cluster only) + * + * @since ArangoDB 3.6.0 + */ + public Integer getWriteConcern() { + return writeConcern; + } + + /** + * @return information about the default sharding method for collections created in this database + * @since ArangoDB 3.6.0 + */ + public String getSharding() { + return sharding; + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof DatabaseEntity)) return false; + DatabaseEntity that = (DatabaseEntity) o; + return Objects.equals(id, that.id) && Objects.equals(name, that.name) && Objects.equals(path, that.path) && Objects.equals(isSystem, that.isSystem) && Objects.equals(replicationFactor, that.replicationFactor) && Objects.equals(writeConcern, that.writeConcern) && Objects.equals(sharding, that.sharding); + } + + @Override + public int hashCode() { + return Objects.hash(id, name, path, isSystem, replicationFactor, writeConcern, sharding); + } +} diff --git a/core/src/main/java/com/arangodb/entity/DocumentCreateEntity.java b/core/src/main/java/com/arangodb/entity/DocumentCreateEntity.java new file mode 100644 index 000000000..c5329e95c --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/DocumentCreateEntity.java @@ -0,0 +1,76 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity; + +import com.arangodb.internal.serde.UserData; + +import java.util.Objects; + +/** + * @author Mark Vollmary + */ +public final class DocumentCreateEntity extends DocumentEntity { + + private T newDocument; + private T oldDocument; + + public DocumentCreateEntity() { + super(); + } + + /** + * @return If the query parameter returnNew is true, then the complete new document is returned. + */ + public T getNew() { + return newDocument; + } + + @UserData + public void setNew(final T newDocument) { + this.newDocument = newDocument; + } + + /** + * @return If the query parameter returnOld is true, then the complete previous revision of the document is + * returned. + */ + public T getOld() { + return oldDocument; + } + + @UserData + public void setOld(final T oldDocument) { + this.oldDocument = oldDocument; + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof DocumentCreateEntity)) return false; + if (!super.equals(o)) return false; + DocumentCreateEntity that = (DocumentCreateEntity) o; + return Objects.equals(newDocument, that.newDocument) && Objects.equals(oldDocument, that.oldDocument); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), newDocument, oldDocument); + } +} diff --git a/core/src/main/java/com/arangodb/entity/DocumentDeleteEntity.java b/core/src/main/java/com/arangodb/entity/DocumentDeleteEntity.java new file mode 100644 index 000000000..41674fdbe --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/DocumentDeleteEntity.java @@ -0,0 +1,63 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity; + +import com.arangodb.internal.serde.UserData; + +import java.util.Objects; + +/** + * @author Mark Vollmary + */ +public final class DocumentDeleteEntity extends DocumentEntity { + + private T oldDocument; + + public DocumentDeleteEntity() { + super(); + } + + /** + * @return If the query parameter returnOld is true, then the complete previous revision of the document is + * returned. + */ + public T getOld() { + return oldDocument; + } + + @UserData + public void setOld(final T oldDocument) { + this.oldDocument = oldDocument; + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof DocumentDeleteEntity)) return false; + if (!super.equals(o)) return false; + DocumentDeleteEntity that = (DocumentDeleteEntity) o; + return Objects.equals(oldDocument, that.oldDocument); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), oldDocument); + } +} diff --git a/core/src/main/java/com/arangodb/entity/DocumentEntity.java b/core/src/main/java/com/arangodb/entity/DocumentEntity.java new file mode 100644 index 000000000..c0f82bd27 --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/DocumentEntity.java @@ -0,0 +1,72 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity; + +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonProperty; + +import java.util.Objects; + +/** + * @author Mark Vollmary + */ +public class DocumentEntity { + + @JsonProperty("_key") + @JsonInclude(JsonInclude.Include.NON_NULL) + private String key; + + @JsonProperty("_id") + @JsonInclude(JsonInclude.Include.NON_NULL) + private String id; + + @JsonProperty("_rev") + @JsonInclude(JsonInclude.Include.NON_NULL) + private String rev; + + public DocumentEntity() { + super(); + } + + public String getKey() { + return key; + } + + public String getId() { + return id; + } + + public String getRev() { + return rev; + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof DocumentEntity)) return false; + DocumentEntity that = (DocumentEntity) o; + return Objects.equals(key, that.key) && Objects.equals(id, that.id) && Objects.equals(rev, that.rev); + } + + @Override + public int hashCode() { + return Objects.hash(key, id, rev); + } +} diff --git a/core/src/main/java/com/arangodb/entity/DocumentImportEntity.java b/core/src/main/java/com/arangodb/entity/DocumentImportEntity.java new file mode 100644 index 000000000..eb7d48f18 --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/DocumentImportEntity.java @@ -0,0 +1,123 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Objects; + +/** + * @author Mark Vollmary + */ +public final class DocumentImportEntity { + + private Integer created; + private Integer errors; + private Integer empty; + private Integer updated; + private Integer ignored; + private Collection details; + + public DocumentImportEntity() { + super(); + details = new ArrayList<>(); + } + + /** + * @return number of documents imported. + */ + public Integer getCreated() { + return created; + } + + public void setCreated(final Integer created) { + this.created = created; + } + + /** + * @return number of documents that were not imported due to an error. + */ + public Integer getErrors() { + return errors; + } + + public void setErrors(final Integer errors) { + this.errors = errors; + } + + /** + * @return number of empty lines found in the input (will only contain a value greater zero for types documents or + * auto). + */ + public Integer getEmpty() { + return empty; + } + + public void setEmpty(final Integer empty) { + this.empty = empty; + } + + /** + * @return number of updated/replaced documents (in case onDuplicate was set to either update or replace). + */ + public Integer getUpdated() { + return updated; + } + + public void setUpdated(final Integer updated) { + this.updated = updated; + } + + /** + * @return number of failed but ignored insert operations (in case onDuplicate was set to ignore). + */ + public Integer getIgnored() { + return ignored; + } + + public void setIgnored(final Integer ignored) { + this.ignored = ignored; + } + + /** + * @return if query parameter details is set to true, the result contain details with more detailed information + * about which documents could not be inserted. + */ + public Collection getDetails() { + return details; + } + + public void setDetails(final Collection details) { + this.details = details; + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof DocumentImportEntity)) return false; + DocumentImportEntity that = (DocumentImportEntity) o; + return Objects.equals(created, that.created) && Objects.equals(errors, that.errors) && Objects.equals(empty, that.empty) && Objects.equals(updated, that.updated) && Objects.equals(ignored, that.ignored) && Objects.equals(details, that.details); + } + + @Override + public int hashCode() { + return Objects.hash(created, errors, empty, updated, ignored, details); + } +} diff --git a/core/src/main/java/com/arangodb/entity/DocumentUpdateEntity.java b/core/src/main/java/com/arangodb/entity/DocumentUpdateEntity.java new file mode 100644 index 000000000..2a0a3b6c2 --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/DocumentUpdateEntity.java @@ -0,0 +1,83 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity; + +import com.arangodb.internal.serde.UserData; +import com.fasterxml.jackson.annotation.JsonProperty; + +import java.util.Objects; + +/** + * @author Mark Vollmary + */ +public final class DocumentUpdateEntity extends DocumentEntity { + + @JsonProperty("_oldRev") + private String oldRev; + private T newDocument; + private T oldDocument; + + public DocumentUpdateEntity() { + super(); + } + + public String getOldRev() { + return oldRev; + } + + /** + * @return If the query parameter returnNew is true, then the complete new document is returned. + */ + public T getNew() { + return newDocument; + } + + @UserData + public void setNew(final T newDocument) { + this.newDocument = newDocument; + } + + /** + * @return If the query parameter returnOld is true, then the complete previous revision of the document is + * returned. + */ + public T getOld() { + return oldDocument; + } + + @UserData + public void setOld(final T oldDocument) { + this.oldDocument = oldDocument; + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof DocumentUpdateEntity)) return false; + if (!super.equals(o)) return false; + DocumentUpdateEntity that = (DocumentUpdateEntity) o; + return Objects.equals(oldRev, that.oldRev) && Objects.equals(newDocument, that.newDocument) && Objects.equals(oldDocument, that.oldDocument); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), oldRev, newDocument, oldDocument); + } +} diff --git a/core/src/main/java/com/arangodb/entity/EdgeDefinition.java b/core/src/main/java/com/arangodb/entity/EdgeDefinition.java new file mode 100644 index 000000000..b89f67417 --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/EdgeDefinition.java @@ -0,0 +1,115 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity; + +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.Objects; + +/** + * @author Mark Vollmary + */ +public final class EdgeDefinition { + private String collection; + private Collection from; + private Collection to; + private final Options options = new Options(); + + public String getCollection() { + return collection; + } + + public EdgeDefinition collection(final String collection) { + this.collection = collection; + return this; + } + + public Collection getFrom() { + return from; + } + + public EdgeDefinition from(final String... from) { + this.from = Arrays.asList(from); + return this; + } + + public Collection getTo() { + return to; + } + + public EdgeDefinition to(final String... to) { + this.to = Arrays.asList(to); + return this; + } + + public Collection getSatellites() { + return options.satellites; + } + + public Options getOptions() { + return options; + } + + /** + * @param satellites collection names that will be used to create SatelliteCollections + * for a Hybrid (Disjoint) SmartGraph (Enterprise Edition only). Each array element + * must be a valid collection name. The collection type cannot be modified later. + * @return this + * @since ArangoDB 3.9.0 + */ + public EdgeDefinition satellites(final String... satellites) { + options.satellites = Arrays.asList(satellites); + return this; + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof EdgeDefinition)) return false; + EdgeDefinition that = (EdgeDefinition) o; + return Objects.equals(collection, that.collection) && Objects.equals(from, that.from) && Objects.equals(to, that.to) && Objects.equals(options, that.options); + } + + @Override + public int hashCode() { + return Objects.hash(collection, from, to, options); + } + + public static final class Options { + private Collection satellites = Collections.emptyList(); + + public Collection getSatellites() { + return satellites; + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof Options)) return false; + Options options = (Options) o; + return Objects.equals(satellites, options.satellites); + } + + @Override + public int hashCode() { + return Objects.hashCode(satellites); + } + } +} diff --git a/src/main/java/com/arangodb/entity/EdgeEntity.java b/core/src/main/java/com/arangodb/entity/EdgeEntity.java similarity index 86% rename from src/main/java/com/arangodb/entity/EdgeEntity.java rename to core/src/main/java/com/arangodb/entity/EdgeEntity.java index 57d0c35b7..39d9017db 100644 --- a/src/main/java/com/arangodb/entity/EdgeEntity.java +++ b/core/src/main/java/com/arangodb/entity/EdgeEntity.java @@ -1,33 +1,32 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.entity; - -/** - * @author Mark Vollmary - * - */ -public class EdgeEntity extends DocumentEntity { - - public EdgeEntity() { - super(); - } - -} +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity; + +/** + * @author Mark Vollmary + */ +public final class EdgeEntity extends DocumentEntity { + + public EdgeEntity() { + super(); + } + +} diff --git a/src/main/java/com/arangodb/entity/PathEntity.java b/core/src/main/java/com/arangodb/entity/EdgeUpdateEntity.java similarity index 52% rename from src/main/java/com/arangodb/entity/PathEntity.java rename to core/src/main/java/com/arangodb/entity/EdgeUpdateEntity.java index 3a4a95483..15666c91d 100644 --- a/src/main/java/com/arangodb/entity/PathEntity.java +++ b/core/src/main/java/com/arangodb/entity/EdgeUpdateEntity.java @@ -1,56 +1,55 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.entity; - -import java.util.Collection; - -/** - * @author Mark Vollmary - * - * @see API - * Documentation - */ -public class PathEntity { - - private Collection edges; - private Collection vertices; - - public PathEntity() { - super(); - } - - public Collection getEdges() { - return edges; - } - - public void setEdges(final Collection edges) { - this.edges = edges; - } - - public Collection getVertices() { - return vertices; - } - - public void setVertices(final Collection vertices) { - this.vertices = vertices; - } - -} +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity; + +import com.fasterxml.jackson.annotation.JsonProperty; + +import java.util.Objects; + +/** + * @author Mark Vollmary + */ +public final class EdgeUpdateEntity extends DocumentEntity { + + @JsonProperty("_oldRev") + private String oldRev; + + public EdgeUpdateEntity() { + super(); + } + + public String getOldRev() { + return oldRev; + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof EdgeUpdateEntity)) return false; + if (!super.equals(o)) return false; + EdgeUpdateEntity that = (EdgeUpdateEntity) o; + return Objects.equals(oldRev, that.oldRev); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), oldRev); + } +} diff --git a/core/src/main/java/com/arangodb/entity/ErrorEntity.java b/core/src/main/java/com/arangodb/entity/ErrorEntity.java new file mode 100644 index 000000000..534a70ecf --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/ErrorEntity.java @@ -0,0 +1,81 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity; + +import java.io.Serializable; +import java.util.Objects; + +/** + * @author Mark Vollmary + */ +public final class ErrorEntity implements Serializable { + + private static final long serialVersionUID = -5918898261563691261L; + + private String errorMessage; + private String exception; + private int code; + private int errorNum; + + public ErrorEntity() { + super(); + } + + /** + * @return a descriptive error message + */ + public String getErrorMessage() { + return errorMessage; + } + + /** + * @return the exception message, passed when transaction fails + */ + public String getException() { + return exception; + } + + /** + * @return the status code + */ + public int getCode() { + return code; + } + + /** + * @return the server error number + */ + public int getErrorNum() { + return errorNum; + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof ErrorEntity)) return false; + ErrorEntity that = (ErrorEntity) o; + return code == that.code && errorNum == that.errorNum && Objects.equals(errorMessage, that.errorMessage) && Objects.equals(exception, that.exception); + } + + @Override + public int hashCode() { + return Objects.hash(errorMessage, exception, code, errorNum); + } +} diff --git a/core/src/main/java/com/arangodb/entity/GraphEntity.java b/core/src/main/java/com/arangodb/entity/GraphEntity.java new file mode 100644 index 000000000..9a068c566 --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/GraphEntity.java @@ -0,0 +1,145 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity; + +import java.util.Collection; +import java.util.Objects; + +/** + * @author Mark Vollmary + */ +public final class GraphEntity { + + private String name; + private Collection edgeDefinitions; + private Collection orphanCollections; + private Integer numberOfShards; + private String _id; + private String _rev; + private ReplicationFactor replicationFactor; + private Integer writeConcern; + private Boolean isSmart; + private Boolean isDisjoint; + private String smartGraphAttribute; + private Boolean isSatellite; + + /** + * @return Name of the graph. + */ + public String getName() { + return name; + } + + /** + * @return An array of definitions for the relations of the graph. + */ + public Collection getEdgeDefinitions() { + return edgeDefinitions; + } + + /** + * @return An array of additional vertex collections. Documents within these collections do not have edges within + * this graph. + */ + public Collection getOrphanCollections() { + return orphanCollections; + } + + /** + * @return Number of shards created for every new collection in the graph. + */ + public Integer getNumberOfShards() { + return numberOfShards; + } + + /** + * @return The internal id value of this graph. + */ + public String getId() { + return _id; + } + + /** + * @return The revision of this graph. Can be used to make sure to not override concurrent modifications to this + * graph. + */ + public String getRev() { + return _rev; + } + + /** + * @return The replication factor used for every new collection in the graph. Can also be satellite for a SmartGraph + * (Enterprise Edition only). + */ + public ReplicationFactor getReplicationFactor() { + return replicationFactor; + } + + /** + * @return Default write concern for new collections in the graph. It determines how many copies of each shard are + * required to be in sync on the different DB-Servers. If there are less then these many copies in the cluster a + * shard will refuse to write. Writes to shards with enough up-to-date copies will succeed at the same time however. + * The value of writeConcern can not be larger than replicationFactor. (cluster only) + */ + public Integer getWriteConcern() { + return writeConcern; + } + + /** + * @return Whether the graph is a SmartGraph (Enterprise Edition only). + */ + public Boolean getIsSmart() { + return isSmart; + } + + /** + * @return Whether the graph is a Disjoint SmartGraph (Enterprise Edition only). + */ + public Boolean getIsDisjoint() { + return isDisjoint; + } + + /** + * @return Name of the sharding attribute in the SmartGraph case (Enterprise Edition only). + */ + public String getSmartGraphAttribute() { + return smartGraphAttribute; + } + + /** + * @return Flag if the graph is a SatelliteGraph (Enterprise Edition only) or not. + */ + public Boolean getIsSatellite() { + return isSatellite; + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof GraphEntity)) return false; + GraphEntity that = (GraphEntity) o; + return Objects.equals(name, that.name) && Objects.equals(edgeDefinitions, that.edgeDefinitions) && Objects.equals(orphanCollections, that.orphanCollections) && Objects.equals(numberOfShards, that.numberOfShards) && Objects.equals(_id, that._id) && Objects.equals(_rev, that._rev) && Objects.equals(replicationFactor, that.replicationFactor) && Objects.equals(writeConcern, that.writeConcern) && Objects.equals(isSmart, that.isSmart) && Objects.equals(isDisjoint, that.isDisjoint) && Objects.equals(smartGraphAttribute, that.smartGraphAttribute) && Objects.equals(isSatellite, that.isSatellite); + } + + @Override + public int hashCode() { + return Objects.hash(name, edgeDefinitions, orphanCollections, numberOfShards, _id, _rev, replicationFactor, writeConcern, isSmart, isDisjoint, smartGraphAttribute, isSatellite); + } +} diff --git a/core/src/main/java/com/arangodb/entity/IndexEntity.java b/core/src/main/java/com/arangodb/entity/IndexEntity.java new file mode 100644 index 000000000..cbf1140bb --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/IndexEntity.java @@ -0,0 +1,149 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity; + +import com.arangodb.model.MDIFieldValueTypes; + +import java.util.Collection; +import java.util.Objects; + +/** + * @author Mark Vollmary + */ +public final class IndexEntity { + + private String id; + private String name; + private IndexType type; + private Collection fields; + private Double selectivityEstimate; + private Boolean unique; + private Boolean sparse; + private Integer minLength; + private Boolean isNewlyCreated; + private Boolean geoJson; + private Boolean constraint; + private Boolean deduplicate; + private Integer expireAfter; + private Boolean inBackground; + private Boolean estimates; + private Boolean cacheEnabled; + private Collection storedValues; + private Boolean legacyPolygons; + private MDIFieldValueTypes fieldValueTypes; + private Collection prefixFields; + + public IndexEntity() { + super(); + } + + public String getId() { + return id; + } + + public Boolean getInBackground() { + return inBackground; + } + + public String getName() { + return name; + } + + public IndexType getType() { + return type; + } + + public Collection getFields() { + return fields; + } + + public Double getSelectivityEstimate() { + return selectivityEstimate; + } + + public Boolean getUnique() { + return unique; + } + + public Boolean getSparse() { + return sparse; + } + + public Integer getMinLength() { + return minLength; + } + + public Boolean getIsNewlyCreated() { + return isNewlyCreated; + } + + public Boolean getGeoJson() { + return geoJson; + } + + public Integer getExpireAfter() { + return expireAfter; + } + + public Boolean getConstraint() { + return constraint; + } + + public Boolean getDeduplicate() { + return deduplicate; + } + + public Boolean getEstimates() { + return estimates; + } + + public Boolean getCacheEnabled() { + return cacheEnabled; + } + + public Collection getStoredValues() { + return storedValues; + } + + public Boolean getLegacyPolygons() { + return legacyPolygons; + } + + public MDIFieldValueTypes getFieldValueTypes() { + return fieldValueTypes; + } + + public Collection getPrefixFields() { + return prefixFields; + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof IndexEntity)) return false; + IndexEntity that = (IndexEntity) o; + return Objects.equals(id, that.id) && Objects.equals(name, that.name) && type == that.type && Objects.equals(fields, that.fields) && Objects.equals(selectivityEstimate, that.selectivityEstimate) && Objects.equals(unique, that.unique) && Objects.equals(sparse, that.sparse) && Objects.equals(minLength, that.minLength) && Objects.equals(isNewlyCreated, that.isNewlyCreated) && Objects.equals(geoJson, that.geoJson) && Objects.equals(constraint, that.constraint) && Objects.equals(deduplicate, that.deduplicate) && Objects.equals(expireAfter, that.expireAfter) && Objects.equals(inBackground, that.inBackground) && Objects.equals(estimates, that.estimates) && Objects.equals(cacheEnabled, that.cacheEnabled) && Objects.equals(storedValues, that.storedValues) && Objects.equals(legacyPolygons, that.legacyPolygons) && fieldValueTypes == that.fieldValueTypes && Objects.equals(prefixFields, that.prefixFields); + } + + @Override + public int hashCode() { + return Objects.hash(id, name, type, fields, selectivityEstimate, unique, sparse, minLength, isNewlyCreated, geoJson, constraint, deduplicate, expireAfter, inBackground, estimates, cacheEnabled, storedValues, legacyPolygons, fieldValueTypes, prefixFields); + } +} diff --git a/core/src/main/java/com/arangodb/entity/IndexType.java b/core/src/main/java/com/arangodb/entity/IndexType.java new file mode 100644 index 000000000..21ca79491 --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/IndexType.java @@ -0,0 +1,76 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity; + +import com.fasterxml.jackson.annotation.JsonProperty; + +/** + * @author Mark Vollmary + * @author Heiko Kernbach + */ +public enum IndexType { + + primary, + + hash, + + skiplist, + + persistent, + + geo, + + geo1, + + geo2, + + /** + * @deprecated since ArangoDB 3.10, use ArangoSearch or Inverted indexes instead. + */ + @Deprecated + fulltext, + + edge, + + ttl, + + zkd, + + /** + * Multi Dimensional Index + * @see Ref Doc + * @since ArangoDB 3.12 + */ + mdi, + + /** + * Multi Dimensional Prefixed Index + * @see Ref Doc + * @since ArangoDB 3.12 + */ + @JsonProperty("mdi-prefixed") + mdiPrefixed, + + /** + * @since ArangoDB 3.10 + */ + inverted +} diff --git a/core/src/main/java/com/arangodb/entity/InvertedIndexEntity.java b/core/src/main/java/com/arangodb/entity/InvertedIndexEntity.java new file mode 100644 index 000000000..102d96fbe --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/InvertedIndexEntity.java @@ -0,0 +1,179 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity; + +import com.arangodb.entity.arangosearch.AnalyzerFeature; +import com.arangodb.entity.arangosearch.ConsolidationPolicy; +import com.arangodb.entity.arangosearch.StoredValue; + +import java.util.Collection; +import java.util.Objects; +import java.util.Set; + +/** + * @author Michele Rastelli + * @since ArangoDB 3.10 + */ +public final class InvertedIndexEntity { + + private String id; + private Boolean isNewlyCreated; + private Boolean unique; + private Boolean sparse; + private Long version; + private Integer code; + private IndexType type; + private String name; + private Collection fields; + private Boolean searchField; + private Collection storedValues; + private Collection optimizeTopK; + private InvertedIndexPrimarySort primarySort; + private String analyzer; + private Set features; + private Boolean includeAllFields; + private Boolean trackListPositions; + private Long cleanupIntervalStep; + private Long commitIntervalMsec; + private Long consolidationIntervalMsec; + private ConsolidationPolicy consolidationPolicy; + private Long writebufferIdle; + private Long writebufferActive; + private Long writebufferSizeMax; + private Boolean cache; + private Boolean primaryKeyCache; + + public String getId() { + return id; + } + + public Boolean getIsNewlyCreated() { + return isNewlyCreated; + } + + public Boolean getUnique() { + return unique; + } + + public Boolean getSparse() { + return sparse; + } + + public Long getVersion() { + return version; + } + + public Integer getCode() { + return code; + } + + public IndexType getType() { + return type; + } + + public String getName() { + return name; + } + + public Collection getFields() { + return fields; + } + + public Boolean getSearchField() { + return searchField; + } + + public Collection getStoredValues() { + return storedValues; + } + + public Collection getOptimizeTopK() { + return optimizeTopK; + } + + public InvertedIndexPrimarySort getPrimarySort() { + return primarySort; + } + + public String getAnalyzer() { + return analyzer; + } + + public Set getFeatures() { + return features; + } + + public Boolean getIncludeAllFields() { + return includeAllFields; + } + + public Boolean getTrackListPositions() { + return trackListPositions; + } + + public Long getCleanupIntervalStep() { + return cleanupIntervalStep; + } + + public Long getCommitIntervalMsec() { + return commitIntervalMsec; + } + + public Long getConsolidationIntervalMsec() { + return consolidationIntervalMsec; + } + + public ConsolidationPolicy getConsolidationPolicy() { + return consolidationPolicy; + } + + public Long getWritebufferIdle() { + return writebufferIdle; + } + + public Long getWritebufferActive() { + return writebufferActive; + } + + public Long getWritebufferSizeMax() { + return writebufferSizeMax; + } + + public Boolean getCache() { + return cache; + } + + public Boolean getPrimaryKeyCache() { + return primaryKeyCache; + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof InvertedIndexEntity)) return false; + InvertedIndexEntity that = (InvertedIndexEntity) o; + return Objects.equals(id, that.id) && Objects.equals(isNewlyCreated, that.isNewlyCreated) && Objects.equals(unique, that.unique) && Objects.equals(sparse, that.sparse) && Objects.equals(version, that.version) && Objects.equals(code, that.code) && type == that.type && Objects.equals(name, that.name) && Objects.equals(fields, that.fields) && Objects.equals(searchField, that.searchField) && Objects.equals(storedValues, that.storedValues) && Objects.equals(optimizeTopK, that.optimizeTopK) && Objects.equals(primarySort, that.primarySort) && Objects.equals(analyzer, that.analyzer) && Objects.equals(features, that.features) && Objects.equals(includeAllFields, that.includeAllFields) && Objects.equals(trackListPositions, that.trackListPositions) && Objects.equals(cleanupIntervalStep, that.cleanupIntervalStep) && Objects.equals(commitIntervalMsec, that.commitIntervalMsec) && Objects.equals(consolidationIntervalMsec, that.consolidationIntervalMsec) && Objects.equals(consolidationPolicy, that.consolidationPolicy) && Objects.equals(writebufferIdle, that.writebufferIdle) && Objects.equals(writebufferActive, that.writebufferActive) && Objects.equals(writebufferSizeMax, that.writebufferSizeMax) && Objects.equals(cache, that.cache) && Objects.equals(primaryKeyCache, that.primaryKeyCache); + } + + @Override + public int hashCode() { + return Objects.hash(id, isNewlyCreated, unique, sparse, version, code, type, name, fields, searchField, storedValues, optimizeTopK, primarySort, analyzer, features, includeAllFields, trackListPositions, cleanupIntervalStep, commitIntervalMsec, consolidationIntervalMsec, consolidationPolicy, writebufferIdle, writebufferActive, writebufferSizeMax, cache, primaryKeyCache); + } +} diff --git a/core/src/main/java/com/arangodb/entity/InvertedIndexField.java b/core/src/main/java/com/arangodb/entity/InvertedIndexField.java new file mode 100644 index 000000000..5c2494191 --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/InvertedIndexField.java @@ -0,0 +1,166 @@ +package com.arangodb.entity; + +import com.arangodb.entity.arangosearch.AnalyzerFeature; + +import java.util.*; + +/** + * @author Michele Rastelli + * @since ArangoDB 3.10 + */ +public final class InvertedIndexField { + private String name; + private String analyzer; + private Boolean includeAllFields; + private Boolean searchField; + private Boolean trackListPositions; + private Boolean cache; + private final Set features = new HashSet<>(); + private Collection nested; + + public String getName() { + return name; + } + + /** + * @param name An attribute path. The . character denotes sub-attributes. + * @return this + */ + public InvertedIndexField name(String name) { + this.name = name; + return this; + } + + public String getAnalyzer() { + return analyzer; + } + + /** + * @param analyzer The name of an Analyzer to use for this field. Default: the value defined by the top-level + * analyzer option, or if not set, the default identity Analyzer. + * @return this + */ + public InvertedIndexField analyzer(String analyzer) { + this.analyzer = analyzer; + return this; + } + + public Boolean getIncludeAllFields() { + return includeAllFields; + } + + /** + * @param includeAllFields This option only applies if you use the inverted index in a search-alias Views. If set to + * true, then all sub-attributes of this field are indexed, excluding any sub-attributes + * that are configured separately by other elements in the fields array (and their + * sub-attributes). The analyzer and features properties apply to the sub-attributes. If set + * to false, then sub-attributes are ignored. The default value is defined by the top-level + * includeAllFields option, or false if not set. + * @return this + */ + public InvertedIndexField includeAllFields(Boolean includeAllFields) { + this.includeAllFields = includeAllFields; + return this; + } + + public Boolean getSearchField() { + return searchField; + } + + /** + * @param searchField This option only applies if you use the inverted index in a search-alias Views. You can set + * the option to true to get the same behavior as with arangosearch Views regarding the indexing + * of array values for this field. If enabled, both, array and primitive values (strings, + * numbers, etc.) are accepted. Every element of an array is indexed according to the + * trackListPositions option. If set to false, it depends on the attribute path. If it explicitly + * expand an array ([*]), then the elements are indexed separately. Otherwise, the array is + * indexed as a whole, but only geopoint and aql Analyzers accept array inputs. You cannot use an + * array expansion if searchField is enabled. Default: the value defined by the top-level + * searchField option, or false if not set. + * @return this + */ + public InvertedIndexField searchField(Boolean searchField) { + this.searchField = searchField; + return this; + } + + public Boolean getTrackListPositions() { + return trackListPositions; + } + + /** + * @param trackListPositions This option only applies if you use the inverted index in a search-alias Views. If set + * to true, then track the value position in arrays for array values. For example, when + * querying a document like { attr: [ "valueX", "valueY", "valueZ" ] }, you need to + * specify the array element, e.g. doc.attr[1] == "valueY". If set to false, all values in + * an array are treated as equal alternatives. You don’t specify an array element in + * queries, e.g. doc.attr == "valueY", and all elements are searched for a match. Default: + * the value defined by the top-level trackListPositions option, or false if not set. + * @return this + */ + public InvertedIndexField trackListPositions(Boolean trackListPositions) { + this.trackListPositions = trackListPositions; + return this; + } + + public Boolean getCache() { + return cache; + } + + /** + * @param cache Enable this option to always cache the field normalization values in memory for this specific field. + * This can improve the performance of scoring and ranking queries. Otherwise, these values are + * memory-mapped and it is up to the operating system to load them from disk into memory and to evict + * them from memory. (Enterprise Edition only) + * @return this + * @since ArangoDB 3.10.2 + */ + public InvertedIndexField cache(Boolean cache) { + this.cache = cache; + return this; + } + + public Set getFeatures() { + return features; + } + + /** + * @param features A list of Analyzer features to use for this field. They define what features are enabled for the + * analyzer. + * @return this + */ + public InvertedIndexField features(AnalyzerFeature... features) { + Collections.addAll(this.features, features); + return this; + } + + public Collection getNested() { + return nested; + } + + /** + * @param nested Index the specified sub-objects that are stored in an array. Other than with the fields property, + * the values get indexed in a way that lets you query for co-occurring values. For example, you can + * search the sub-objects and all the conditions need to be met by a single sub-object instead of + * across all of them. This property is available in the Enterprise Edition only. + * @return this + */ + public InvertedIndexField nested(InvertedIndexField... nested) { + if (this.nested == null) this.nested = new ArrayList<>(); + Collections.addAll(this.nested, nested); + return this; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + InvertedIndexField that = (InvertedIndexField) o; + return Objects.equals(name, that.name) && Objects.equals(analyzer, that.analyzer) && Objects.equals(includeAllFields, that.includeAllFields) && Objects.equals(searchField, that.searchField) && Objects.equals(trackListPositions, that.trackListPositions) && Objects.equals(cache, that.cache) && Objects.equals(features, that.features) && Objects.equals(nested, that.nested); + } + + @Override + public int hashCode() { + return Objects.hash(name, analyzer, includeAllFields, searchField, trackListPositions, cache, features, nested); + } +} diff --git a/core/src/main/java/com/arangodb/entity/InvertedIndexPrimarySort.java b/core/src/main/java/com/arangodb/entity/InvertedIndexPrimarySort.java new file mode 100644 index 000000000..776130882 --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/InvertedIndexPrimarySort.java @@ -0,0 +1,119 @@ +package com.arangodb.entity; + +import com.arangodb.entity.arangosearch.ArangoSearchCompression; +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Objects; + +/** + * @author Michele Rastelli + * @since ArangoDB 3.10 + */ +public final class InvertedIndexPrimarySort { + private final List fields = new ArrayList<>(); + private ArangoSearchCompression compression; + private Boolean cache; + + public List getFields() { + return fields; + } + + /** + * @param fields An array of the fields to sort the index by and the direction to sort each field in. + * @return this + */ + public InvertedIndexPrimarySort fields(Field... fields) { + Collections.addAll(this.fields, fields); + return this; + } + + public ArangoSearchCompression getCompression() { + return compression; + } + + /** + * @param compression Defines how to compress the primary sort data. + * @return this + */ + public InvertedIndexPrimarySort compression(ArangoSearchCompression compression) { + this.compression = compression; + return this; + } + + public Boolean getCache() { + return cache; + } + + /** + * @param cache If you enable this option, then the primary sort columns are always cached in memory. This can + * improve the performance of queries that utilize the primary sort order. Otherwise, these values are + * memory-mapped and it is up to the operating system to load them from disk into memory and to evict + * them from memory (Enterprise Edition only). + * @return this + * @since ArangoDB 3.10.2 + */ + public InvertedIndexPrimarySort cache(Boolean cache) { + this.cache = cache; + return this; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + InvertedIndexPrimarySort that = (InvertedIndexPrimarySort) o; + return Objects.equals(fields, that.fields) && compression == that.compression && Objects.equals(cache, that.cache); + } + + @Override + public int hashCode() { + return Objects.hash(fields, compression, cache); + } + + public static class Field { + private final String field; + private final Direction direction; + + /** + * @param field An attribute path. The . character denotes sub-attributes. + * @param direction The sorting direction. + */ + @JsonCreator + public Field(@JsonProperty("field") String field, @JsonProperty("direction") Direction direction) { + this.field = field; + this.direction = direction; + } + + public String getField() { + return field; + } + + public Direction getDirection() { + return direction; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Field field1 = (Field) o; + return Objects.equals(field, field1.field) && direction == field1.direction; + } + + @Override + public int hashCode() { + return Objects.hash(field, direction); + } + + public enum Direction { + asc, + desc + } + + } + +} diff --git a/core/src/main/java/com/arangodb/entity/KeyOptions.java b/core/src/main/java/com/arangodb/entity/KeyOptions.java new file mode 100644 index 000000000..dbeb87d8c --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/KeyOptions.java @@ -0,0 +1,90 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity; + +import java.util.Objects; + +/** + * @author Mark Vollmary + */ +public final class KeyOptions { + + private Boolean allowUserKeys; + private KeyType type; + private Integer increment; + private Integer offset; + + public KeyOptions() { + super(); + } + + public KeyOptions(final Boolean allowUserKeys, final KeyType type, final Integer increment, final Integer offset) { + super(); + this.allowUserKeys = allowUserKeys; + this.type = type; + this.increment = increment; + this.offset = offset; + } + + public Boolean getAllowUserKeys() { + return allowUserKeys; + } + + public void setAllowUserKeys(final Boolean allowUserKeys) { + this.allowUserKeys = allowUserKeys; + } + + public KeyType getType() { + return type; + } + + public void setType(final KeyType type) { + this.type = type; + } + + public Integer getIncrement() { + return increment; + } + + public void setIncrement(final Integer increment) { + this.increment = increment; + } + + public Integer getOffset() { + return offset; + } + + public void setOffset(final Integer offset) { + this.offset = offset; + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof KeyOptions)) return false; + KeyOptions that = (KeyOptions) o; + return Objects.equals(allowUserKeys, that.allowUserKeys) && type == that.type && Objects.equals(increment, that.increment) && Objects.equals(offset, that.offset); + } + + @Override + public int hashCode() { + return Objects.hash(allowUserKeys, type, increment, offset); + } +} diff --git a/src/main/java/com/arangodb/entity/KeyType.java b/core/src/main/java/com/arangodb/entity/KeyType.java similarity index 92% rename from src/main/java/com/arangodb/entity/KeyType.java rename to core/src/main/java/com/arangodb/entity/KeyType.java index d7a593726..4ac2da4f3 100644 --- a/src/main/java/com/arangodb/entity/KeyType.java +++ b/core/src/main/java/com/arangodb/entity/KeyType.java @@ -1,29 +1,28 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.entity; - -/** - * @author Mark Vollmary - * - */ -public enum KeyType { - traditional, autoincrement -} +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity; + +/** + * @author Mark Vollmary + */ +public enum KeyType { + traditional, autoincrement, uuid, padded +} diff --git a/core/src/main/java/com/arangodb/entity/License.java b/core/src/main/java/com/arangodb/entity/License.java new file mode 100644 index 000000000..92803eed4 --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/License.java @@ -0,0 +1,36 @@ +/* + * DISCLAIMER + * + * Copyright 2019 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity; + +import com.fasterxml.jackson.annotation.JsonProperty; + +/** + * @author Axel Becker + */ +public enum License { + + @JsonProperty("enterprise") + ENTERPRISE, + + @JsonProperty("community") + COMMUNITY + +} diff --git a/src/main/java/com/arangodb/entity/LoadBalancingStrategy.java b/core/src/main/java/com/arangodb/entity/LoadBalancingStrategy.java similarity index 92% rename from src/main/java/com/arangodb/entity/LoadBalancingStrategy.java rename to core/src/main/java/com/arangodb/entity/LoadBalancingStrategy.java index b75257b0a..525744fb3 100644 --- a/src/main/java/com/arangodb/entity/LoadBalancingStrategy.java +++ b/core/src/main/java/com/arangodb/entity/LoadBalancingStrategy.java @@ -1,29 +1,28 @@ -/* - * DISCLAIMER - * - * Copyright 2017 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.entity; - -/** - * @author Mark Vollmary - * - */ -public enum LoadBalancingStrategy { - NONE, ROUND_ROBIN, ONE_RANDOM -} +/* + * DISCLAIMER + * + * Copyright 2017 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity; + +/** + * @author Mark Vollmary + */ +public enum LoadBalancingStrategy { + NONE, ROUND_ROBIN, ONE_RANDOM +} diff --git a/core/src/main/java/com/arangodb/entity/LogEntriesEntity.java b/core/src/main/java/com/arangodb/entity/LogEntriesEntity.java new file mode 100644 index 000000000..9622525b1 --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/LogEntriesEntity.java @@ -0,0 +1,95 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity; + +import java.util.List; +import java.util.Objects; + +/** + * @author Michele Rastelli + * @since ArangoDB 3.8 + */ +public final class LogEntriesEntity { + + private Long total; + private List messages; + + public Long getTotal() { + return total; + } + + public List getMessages() { + return messages; + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof LogEntriesEntity)) return false; + LogEntriesEntity that = (LogEntriesEntity) o; + return Objects.equals(total, that.total) && Objects.equals(messages, that.messages); + } + + @Override + public int hashCode() { + return Objects.hash(total, messages); + } + + public static final class Message { + Long id; + String topic; + String level; + String date; + String message; + + public Long getId() { + return id; + } + + public String getTopic() { + return topic; + } + + public String getLevel() { + return level; + } + + public String getDate() { + return date; + } + + public String getMessage() { + return message; + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof Message)) return false; + Message message1 = (Message) o; + return Objects.equals(id, message1.id) && Objects.equals(topic, message1.topic) && Objects.equals(level, message1.level) && Objects.equals(date, message1.date) && Objects.equals(message, message1.message); + } + + @Override + public int hashCode() { + return Objects.hash(id, topic, level, date, message); + } + } + +} diff --git a/src/main/java/com/arangodb/entity/LogLevel.java b/core/src/main/java/com/arangodb/entity/LogLevel.java similarity index 63% rename from src/main/java/com/arangodb/entity/LogLevel.java rename to core/src/main/java/com/arangodb/entity/LogLevel.java index ba3de03df..8895bfa7a 100644 --- a/src/main/java/com/arangodb/entity/LogLevel.java +++ b/core/src/main/java/com/arangodb/entity/LogLevel.java @@ -1,50 +1,49 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.entity; - -/** - * @author Mark Vollmary - * - */ -public enum LogLevel { - - FATAL(0), ERROR(1), WARNING(2), INFO(3), DEBUG(4); - - private final int level; - - private LogLevel(final int level) { - this.level = level; - } - - public int getLevel() { - return level; - } - - public static LogLevel fromLevel(final int level) { - for (final LogLevel logLevel : LogLevel.values()) { - if (logLevel.level == level) { - return logLevel; - } - } - return null; - } - -} +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity; + +/** + * @author Mark Vollmary + */ +public enum LogLevel { + + FATAL(0), ERROR(1), WARNING(2), INFO(3), DEBUG(4); + + private final int level; + + LogLevel(final int level) { + this.level = level; + } + + public static LogLevel fromLevel(final int level) { + for (final LogLevel logLevel : LogLevel.values()) { + if (logLevel.level == level) { + return logLevel; + } + } + return null; + } + + public int getLevel() { + return level; + } + +} diff --git a/core/src/main/java/com/arangodb/entity/LogLevelEntity.java b/core/src/main/java/com/arangodb/entity/LogLevelEntity.java new file mode 100644 index 000000000..a12372749 --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/LogLevelEntity.java @@ -0,0 +1,487 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity; + +import com.fasterxml.jackson.annotation.JsonProperty; + +import java.util.Objects; + +/** + * @author Mark Vollmary + */ +public final class LogLevelEntity { + + private LogLevel all; + private LogLevel agency; + private LogLevel agencycomm; + private LogLevel agencystore; + private LogLevel backup; + private LogLevel bench; + private LogLevel cluster; + private LogLevel communication; + private LogLevel authentication; + private LogLevel config; + private LogLevel crash; + private LogLevel dump; + private LogLevel engines; + private LogLevel cache; + private LogLevel security; + private LogLevel startup; + private LogLevel trx; + private LogLevel supervision; + private LogLevel threads; + private LogLevel ttl; + private LogLevel ssl; + private LogLevel replication2; + private LogLevel restore; + private LogLevel memory; + private LogLevel validation; + private LogLevel statistics; + private LogLevel v8; + private LogLevel syscall; + private LogLevel libiresearch; + private LogLevel license; + private LogLevel deprecation; + private LogLevel rocksdb; + private LogLevel requests; + @JsonProperty("rep-wal") + private LogLevel repWal; + private LogLevel arangosearch; + private LogLevel views; + @JsonProperty("rep-state") + private LogLevel repState; + private LogLevel authorization; + private LogLevel queries; + private LogLevel aql; + private LogLevel graphs; + private LogLevel maintenance; + private LogLevel development; + private LogLevel replication; + private LogLevel httpclient; + private LogLevel heartbeat; + private LogLevel flush; + private LogLevel general; + + public LogLevelEntity() { + super(); + } + + public LogLevel getAll() { + return all; + } + + public void setAll(LogLevel all) { + this.all = all; + } + + public LogLevel getAgency() { + return agency; + } + + public void setAgency(LogLevel agency) { + this.agency = agency; + } + + public LogLevel getAgencycomm() { + return agencycomm; + } + + public void setAgencycomm(LogLevel agencycomm) { + this.agencycomm = agencycomm; + } + + public LogLevel getAgencystore() { + return agencystore; + } + + public void setAgencystore(LogLevel agencystore) { + this.agencystore = agencystore; + } + + public LogLevel getBackup() { + return backup; + } + + public void setBackup(LogLevel backup) { + this.backup = backup; + } + + public LogLevel getBench() { + return bench; + } + + public void setBench(LogLevel bench) { + this.bench = bench; + } + + public LogLevel getCluster() { + return cluster; + } + + public void setCluster(LogLevel cluster) { + this.cluster = cluster; + } + + public LogLevel getCommunication() { + return communication; + } + + public void setCommunication(LogLevel communication) { + this.communication = communication; + } + + public LogLevel getAuthentication() { + return authentication; + } + + public void setAuthentication(LogLevel authentication) { + this.authentication = authentication; + } + + public LogLevel getConfig() { + return config; + } + + public void setConfig(LogLevel config) { + this.config = config; + } + + public LogLevel getCrash() { + return crash; + } + + public void setCrash(LogLevel crash) { + this.crash = crash; + } + + public LogLevel getDump() { + return dump; + } + + public void setDump(LogLevel dump) { + this.dump = dump; + } + + public LogLevel getEngines() { + return engines; + } + + public void setEngines(LogLevel engines) { + this.engines = engines; + } + + public LogLevel getCache() { + return cache; + } + + public void setCache(LogLevel cache) { + this.cache = cache; + } + + public LogLevel getSecurity() { + return security; + } + + public void setSecurity(LogLevel security) { + this.security = security; + } + + public LogLevel getStartup() { + return startup; + } + + public void setStartup(LogLevel startup) { + this.startup = startup; + } + + public LogLevel getTrx() { + return trx; + } + + public void setTrx(LogLevel trx) { + this.trx = trx; + } + + public LogLevel getSupervision() { + return supervision; + } + + public void setSupervision(LogLevel supervision) { + this.supervision = supervision; + } + + public LogLevel getThreads() { + return threads; + } + + public void setThreads(LogLevel threads) { + this.threads = threads; + } + + public LogLevel getTtl() { + return ttl; + } + + public void setTtl(LogLevel ttl) { + this.ttl = ttl; + } + + public LogLevel getSsl() { + return ssl; + } + + public void setSsl(LogLevel ssl) { + this.ssl = ssl; + } + + public LogLevel getReplication2() { + return replication2; + } + + public void setReplication2(LogLevel replication2) { + this.replication2 = replication2; + } + + public LogLevel getRestore() { + return restore; + } + + public void setRestore(LogLevel restore) { + this.restore = restore; + } + + public LogLevel getMemory() { + return memory; + } + + public void setMemory(LogLevel memory) { + this.memory = memory; + } + + public LogLevel getValidation() { + return validation; + } + + public void setValidation(LogLevel validation) { + this.validation = validation; + } + + public LogLevel getStatistics() { + return statistics; + } + + public void setStatistics(LogLevel statistics) { + this.statistics = statistics; + } + + public LogLevel getV8() { + return v8; + } + + public void setV8(LogLevel v8) { + this.v8 = v8; + } + + public LogLevel getSyscall() { + return syscall; + } + + public void setSyscall(LogLevel syscall) { + this.syscall = syscall; + } + + public LogLevel getLibiresearch() { + return libiresearch; + } + + public void setLibiresearch(LogLevel libiresearch) { + this.libiresearch = libiresearch; + } + + public LogLevel getLicense() { + return license; + } + + public void setLicense(LogLevel license) { + this.license = license; + } + + public LogLevel getDeprecation() { + return deprecation; + } + + public void setDeprecation(LogLevel deprecation) { + this.deprecation = deprecation; + } + + public LogLevel getRocksdb() { + return rocksdb; + } + + public void setRocksdb(LogLevel rocksdb) { + this.rocksdb = rocksdb; + } + + public LogLevel getRequests() { + return requests; + } + + public void setRequests(LogLevel requests) { + this.requests = requests; + } + + public LogLevel getRepWal() { + return repWal; + } + + public void setRepWal(LogLevel repWal) { + this.repWal = repWal; + } + + public LogLevel getArangosearch() { + return arangosearch; + } + + public void setArangosearch(LogLevel arangosearch) { + this.arangosearch = arangosearch; + } + + public LogLevel getViews() { + return views; + } + + public void setViews(LogLevel views) { + this.views = views; + } + + public LogLevel getRepState() { + return repState; + } + + public void setRepState(LogLevel repState) { + this.repState = repState; + } + + public LogLevel getAuthorization() { + return authorization; + } + + public void setAuthorization(LogLevel authorization) { + this.authorization = authorization; + } + + public LogLevel getQueries() { + return queries; + } + + public void setQueries(LogLevel queries) { + this.queries = queries; + } + + public LogLevel getAql() { + return aql; + } + + public void setAql(LogLevel aql) { + this.aql = aql; + } + + public LogLevel getGraphs() { + return graphs; + } + + public void setGraphs(LogLevel graphs) { + this.graphs = graphs; + } + + public LogLevel getMaintenance() { + return maintenance; + } + + public void setMaintenance(LogLevel maintenance) { + this.maintenance = maintenance; + } + + public LogLevel getDevelopment() { + return development; + } + + public void setDevelopment(LogLevel development) { + this.development = development; + } + + public LogLevel getReplication() { + return replication; + } + + public void setReplication(LogLevel replication) { + this.replication = replication; + } + + public LogLevel getHttpclient() { + return httpclient; + } + + public void setHttpclient(LogLevel httpclient) { + this.httpclient = httpclient; + } + + public LogLevel getHeartbeat() { + return heartbeat; + } + + public void setHeartbeat(LogLevel heartbeat) { + this.heartbeat = heartbeat; + } + + public LogLevel getFlush() { + return flush; + } + + public void setFlush(LogLevel flush) { + this.flush = flush; + } + + public LogLevel getGeneral() { + return general; + } + + public void setGeneral(LogLevel general) { + this.general = general; + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof LogLevelEntity)) return false; + LogLevelEntity that = (LogLevelEntity) o; + return all == that.all && agency == that.agency && agencycomm == that.agencycomm && agencystore == that.agencystore && backup == that.backup && bench == that.bench && cluster == that.cluster && communication == that.communication && authentication == that.authentication && config == that.config && crash == that.crash && dump == that.dump && engines == that.engines && cache == that.cache && security == that.security && startup == that.startup && trx == that.trx && supervision == that.supervision && threads == that.threads && ttl == that.ttl && ssl == that.ssl && replication2 == that.replication2 && restore == that.restore && memory == that.memory && validation == that.validation && statistics == that.statistics && v8 == that.v8 && syscall == that.syscall && libiresearch == that.libiresearch && license == that.license && deprecation == that.deprecation && rocksdb == that.rocksdb && requests == that.requests && repWal == that.repWal && arangosearch == that.arangosearch && views == that.views && repState == that.repState && authorization == that.authorization && queries == that.queries && aql == that.aql && graphs == that.graphs && maintenance == that.maintenance && development == that.development && replication == that.replication && httpclient == that.httpclient && heartbeat == that.heartbeat && flush == that.flush && general == that.general; + } + + @Override + public int hashCode() { + return Objects.hash(all, agency, agencycomm, agencystore, backup, bench, cluster, communication, authentication, config, crash, dump, engines, cache, security, startup, trx, supervision, threads, ttl, ssl, replication2, restore, memory, validation, statistics, v8, syscall, libiresearch, license, deprecation, rocksdb, requests, repWal, arangosearch, views, repState, authorization, queries, aql, graphs, maintenance, development, replication, httpclient, heartbeat, flush, general); + } + + public enum LogLevel { + FATAL, ERROR, WARNING, INFO, DEBUG, TRACE, DEFAULT + } + +} diff --git a/core/src/main/java/com/arangodb/entity/MultiDocumentEntity.java b/core/src/main/java/com/arangodb/entity/MultiDocumentEntity.java new file mode 100644 index 000000000..14c899702 --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/MultiDocumentEntity.java @@ -0,0 +1,97 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity; + +import java.util.ArrayList; +import java.util.List; +import java.util.Objects; + +/** + * @author Mark Vollmary + */ +public final class MultiDocumentEntity { + + private List documents = new ArrayList<>(); + private List errors = new ArrayList<>(); + private List documentsAndErrors = new ArrayList<>(); + private boolean isPotentialDirtyRead = false; + + public MultiDocumentEntity() { + super(); + } + + /** + * @return all successfully processed documents + */ + public List getDocuments() { + return documents; + } + + public void setDocuments(final List documents) { + this.documents = documents; + } + + /** + * @return all errors + */ + public List getErrors() { + return errors; + } + + public void setErrors(final List errors) { + this.errors = errors; + } + + /** + * @return all successfully processed documents and all errors in the same order they are processed + */ + public List getDocumentsAndErrors() { + return documentsAndErrors; + } + + public void setDocumentsAndErrors(final List documentsAndErrors) { + this.documentsAndErrors = documentsAndErrors; + } + + /** + * @return true if the result is a potential dirty read + * @since ArangoDB 3.10 + */ + public Boolean isPotentialDirtyRead() { + return isPotentialDirtyRead; + } + + public void setPotentialDirtyRead(final Boolean isPotentialDirtyRead) { + this.isPotentialDirtyRead = isPotentialDirtyRead; + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof MultiDocumentEntity)) return false; + MultiDocumentEntity that = (MultiDocumentEntity) o; + return isPotentialDirtyRead == that.isPotentialDirtyRead && Objects.equals(documents, that.documents) && Objects.equals(errors, that.errors) && Objects.equals(documentsAndErrors, that.documentsAndErrors); + } + + @Override + public int hashCode() { + return Objects.hash(documents, errors, documentsAndErrors, isPotentialDirtyRead); + } +} diff --git a/src/main/java/com/arangodb/entity/Permissions.java b/core/src/main/java/com/arangodb/entity/Permissions.java similarity index 76% rename from src/main/java/com/arangodb/entity/Permissions.java rename to core/src/main/java/com/arangodb/entity/Permissions.java index 81cdf7c89..73f8b8105 100644 --- a/src/main/java/com/arangodb/entity/Permissions.java +++ b/core/src/main/java/com/arangodb/entity/Permissions.java @@ -1,39 +1,45 @@ -/* - * DISCLAIMER - * - * Copyright 2017 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.entity; - -/** - * @author Mark Vollmary - * - */ -public enum Permissions { - - /** - * read and write access - */ - RW, - /** - * read-only access - */ - RO, - NONE; - -} +/* + * DISCLAIMER + * + * Copyright 2017 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity; + +import com.fasterxml.jackson.annotation.JsonProperty; + +/** + * @author Mark Vollmary + */ +public enum Permissions { + + /** + * read and write access + */ + @JsonProperty("rw") + RW, + + /** + * read-only access + */ + @JsonProperty("ro") + RO, + + @JsonProperty("none") + NONE + +} diff --git a/core/src/main/java/com/arangodb/entity/QueryCachePropertiesEntity.java b/core/src/main/java/com/arangodb/entity/QueryCachePropertiesEntity.java new file mode 100644 index 000000000..83758e581 --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/QueryCachePropertiesEntity.java @@ -0,0 +1,81 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity; + +import java.util.Objects; + +/** + * @author Mark Vollmary + */ +public final class QueryCachePropertiesEntity { + + private CacheMode mode; + private Long maxResults; + + public QueryCachePropertiesEntity() { + super(); + } + + /** + * @return the mode the AQL query cache operates in. The mode is one of the following values: off, on or demand + */ + public CacheMode getMode() { + return mode; + } + + /** + * @param mode the mode the AQL query cache operates in. The mode is one of the following values: off, on or demand + */ + public void setMode(final CacheMode mode) { + this.mode = mode; + } + + /** + * @return the maximum number of query results that will be stored per database-specific cache + */ + public Long getMaxResults() { + return maxResults; + } + + /** + * @param maxResults the maximum number of query results that will be stored per database-specific cache + */ + public void setMaxResults(final Long maxResults) { + this.maxResults = maxResults; + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof QueryCachePropertiesEntity)) return false; + QueryCachePropertiesEntity that = (QueryCachePropertiesEntity) o; + return mode == that.mode && Objects.equals(maxResults, that.maxResults); + } + + @Override + public int hashCode() { + return Objects.hash(mode, maxResults); + } + + public enum CacheMode { + off, on, demand + } + +} diff --git a/core/src/main/java/com/arangodb/entity/QueryEntity.java b/core/src/main/java/com/arangodb/entity/QueryEntity.java new file mode 100644 index 000000000..9518f5fe4 --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/QueryEntity.java @@ -0,0 +1,124 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity; + +import java.util.Date; +import java.util.Map; +import java.util.Objects; + +/** + * @author Mark Vollmary + */ +public final class QueryEntity { + + private String id; + private String database; + private String user; + private String query; + private Map bindVars; + private Date started; + private Double runTime; + private Long peakMemoryUsage; + private QueryExecutionState state; + private Boolean stream; + + /** + * @return the query's id + */ + public String getId() { + return id; + } + + /** + * @return the name of the database the query runs in + */ + public String getDatabase() { + return database; + } + + /** + * @return the name of the user that started the query + */ + public String getUser() { + return user; + } + + /** + * @return the query string (potentially truncated) + */ + public String getQuery() { + return query; + } + + /** + * @return the bind parameter values used by the query + */ + public Map getBindVars() { + return bindVars; + } + + /** + * @return the date and time when the query was started + */ + public Date getStarted() { + return started; + } + + /** + * @return the query's run time up to the point the list of queries was queried + */ + public Double getRunTime() { + return runTime; + } + + /** + * @return the query’s peak memory usage in bytes (in increments of 32KB) + */ + public Long getPeakMemoryUsage() { + return peakMemoryUsage; + } + + /** + * @return the query's current execution state + */ + public QueryExecutionState getState() { + return state; + } + + /** + * @return whether or not the query uses a streaming cursor + */ + public Boolean getStream() { + return stream; + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof QueryEntity)) return false; + QueryEntity that = (QueryEntity) o; + return Objects.equals(id, that.id) && Objects.equals(database, that.database) && Objects.equals(user, that.user) && Objects.equals(query, that.query) && Objects.equals(bindVars, that.bindVars) && Objects.equals(started, that.started) && Objects.equals(runTime, that.runTime) && Objects.equals(peakMemoryUsage, that.peakMemoryUsage) && state == that.state && Objects.equals(stream, that.stream); + } + + @Override + public int hashCode() { + return Objects.hash(id, database, user, query, bindVars, started, runTime, peakMemoryUsage, state, stream); + } +} diff --git a/core/src/main/java/com/arangodb/entity/QueryExecutionState.java b/core/src/main/java/com/arangodb/entity/QueryExecutionState.java new file mode 100644 index 000000000..f42a584ca --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/QueryExecutionState.java @@ -0,0 +1,64 @@ +/* + * DISCLAIMER + * + * Copyright 2018 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity; + +import com.fasterxml.jackson.annotation.JsonProperty; + +/** + * @author Mark Vollmary + */ +public enum QueryExecutionState { + @JsonProperty("initializing") + INITIALIZING, + + @JsonProperty("parsing") + PARSING, + + @JsonProperty("optimizing ast") + OPTIMIZING_AST, + + @JsonProperty("loading collections") + LOADING_COLLECTIONS, + + @JsonProperty("instantiating plan") + INSTANTIATING_PLAN, + + @JsonProperty("instantiating executors") + INSTANTIATING_EXECUTORS, + + @JsonProperty("optimizing plan") + OPTIMIZING_PLAN, + + @JsonProperty("executing") + EXECUTING, + + @JsonProperty("finalizing") + FINALIZING, + + @JsonProperty("finished") + FINISHED, + + @JsonProperty("killed") + KILLED, + + @JsonProperty("invalid") + INVALID +} diff --git a/core/src/main/java/com/arangodb/entity/QueryOptimizerRule.java b/core/src/main/java/com/arangodb/entity/QueryOptimizerRule.java new file mode 100644 index 000000000..ef7068af6 --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/QueryOptimizerRule.java @@ -0,0 +1,76 @@ +package com.arangodb.entity; + +import java.util.Objects; + +/** + * @since ArangoDB 3.10 + */ +public final class QueryOptimizerRule { + private String name; + private Flags flags; + + public String getName() { + return name; + } + + public Flags getFlags() { + return flags; + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof QueryOptimizerRule)) return false; + QueryOptimizerRule that = (QueryOptimizerRule) o; + return Objects.equals(name, that.name) && Objects.equals(flags, that.flags); + } + + @Override + public int hashCode() { + return Objects.hash(name, flags); + } + + public static class Flags { + private Boolean hidden; + private Boolean clusterOnly; + private Boolean canBeDisabled; + private Boolean canCreateAdditionalPlans; + private Boolean disabledByDefault; + private Boolean enterpriseOnly; + + public Boolean getHidden() { + return hidden; + } + + public Boolean getClusterOnly() { + return clusterOnly; + } + + public Boolean getCanBeDisabled() { + return canBeDisabled; + } + + public Boolean getCanCreateAdditionalPlans() { + return canCreateAdditionalPlans; + } + + public Boolean getDisabledByDefault() { + return disabledByDefault; + } + + public Boolean getEnterpriseOnly() { + return enterpriseOnly; + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof Flags)) return false; + Flags flags = (Flags) o; + return Objects.equals(hidden, flags.hidden) && Objects.equals(clusterOnly, flags.clusterOnly) && Objects.equals(canBeDisabled, flags.canBeDisabled) && Objects.equals(canCreateAdditionalPlans, flags.canCreateAdditionalPlans) && Objects.equals(disabledByDefault, flags.disabledByDefault) && Objects.equals(enterpriseOnly, flags.enterpriseOnly); + } + + @Override + public int hashCode() { + return Objects.hash(hidden, clusterOnly, canBeDisabled, canCreateAdditionalPlans, disabledByDefault, enterpriseOnly); + } + } +} diff --git a/core/src/main/java/com/arangodb/entity/QueryTrackingPropertiesEntity.java b/core/src/main/java/com/arangodb/entity/QueryTrackingPropertiesEntity.java new file mode 100644 index 000000000..52378b408 --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/QueryTrackingPropertiesEntity.java @@ -0,0 +1,145 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity; + +import java.util.Objects; + +/** + * @author Mark Vollmary + */ +public final class QueryTrackingPropertiesEntity { + + private Boolean enabled; + private Boolean trackSlowQueries; + private Long maxSlowQueries; + private Long slowQueryThreshold; + private Long maxQueryStringLength; + + public QueryTrackingPropertiesEntity() { + super(); + } + + /** + * @return If set to true, then queries will be tracked. If set to false, neither queries nor slow queries will be + * tracked + */ + public Boolean getEnabled() { + return enabled; + } + + /** + * @param enabled If set to true, then queries will be tracked. If set to false, neither queries nor slow queries + * will + * be tracked + */ + public void setEnabled(final Boolean enabled) { + this.enabled = enabled; + } + + /** + * @return If set to true, then slow queries will be tracked in the list of slow queries if their runtime exceeds + * the value set in slowQueryThreshold. In order for slow queries to be tracked, the enabled property must + * also be set to true. + */ + public Boolean getTrackSlowQueries() { + return trackSlowQueries; + } + + /** + * @param trackSlowQueries If set to true, then slow queries will be tracked in the list of slow queries if their + * runtime exceeds + * the value set in slowQueryThreshold. In order for slow queries to be tracked, the + * enabled property + * must also be set to true. + */ + public void setTrackSlowQueries(final Boolean trackSlowQueries) { + this.trackSlowQueries = trackSlowQueries; + } + + /** + * @return The maximum number of slow queries to keep in the list of slow queries. If the list of slow queries is + * full, the oldest entry in it will be discarded when additional slow queries occur. + */ + public Long getMaxSlowQueries() { + return maxSlowQueries; + } + + /** + * @param maxSlowQueries The maximum number of slow queries to keep in the list of slow queries. If the list of + * slow queries is + * full, the oldest entry in it will be discarded when additional slow queries occur. + */ + public void setMaxSlowQueries(final Long maxSlowQueries) { + this.maxSlowQueries = maxSlowQueries; + } + + /** + * @return The threshold value for treating a query as slow. A query with a runtime greater or equal to this + * threshold value will be put into the list of slow queries when slow query tracking is enabled. The value + * for slowQueryThreshold is specified in seconds. + */ + public Long getSlowQueryThreshold() { + return slowQueryThreshold; + } + + /** + * @param slowQueryThreshold The threshold value for treating a query as slow. A query with a runtime greater or + * equal to this + * threshold value will be put into the list of slow queries when slow query tracking + * is enabled. The + * value for slowQueryThreshold is specified in seconds. + */ + public void setSlowQueryThreshold(final Long slowQueryThreshold) { + this.slowQueryThreshold = slowQueryThreshold; + } + + /** + * @return The maximum query string length to keep in the list of queries. Query strings can have arbitrary lengths, + * and this property can be used to save memory in case very long query strings are used. The value is + * specified in bytes. + */ + public Long getMaxQueryStringLength() { + return maxQueryStringLength; + } + + /** + * @param maxQueryStringLength The maximum query string length to keep in the list of queries. Query strings can + * have arbitrary + * lengths, and this property can be used to save memory in case very long query + * strings are used. The + * value is specified in bytes. + */ + public void setMaxQueryStringLength(final Long maxQueryStringLength) { + this.maxQueryStringLength = maxQueryStringLength; + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof QueryTrackingPropertiesEntity)) return false; + QueryTrackingPropertiesEntity that = (QueryTrackingPropertiesEntity) o; + return Objects.equals(enabled, that.enabled) && Objects.equals(trackSlowQueries, that.trackSlowQueries) && Objects.equals(maxSlowQueries, that.maxSlowQueries) && Objects.equals(slowQueryThreshold, that.slowQueryThreshold) && Objects.equals(maxQueryStringLength, that.maxQueryStringLength); + } + + @Override + public int hashCode() { + return Objects.hash(enabled, trackSlowQueries, maxSlowQueries, slowQueryThreshold, maxQueryStringLength); + } +} diff --git a/core/src/main/java/com/arangodb/entity/ReplicationFactor.java b/core/src/main/java/com/arangodb/entity/ReplicationFactor.java new file mode 100644 index 000000000..60e56e4af --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/ReplicationFactor.java @@ -0,0 +1,60 @@ +/* + * DISCLAIMER + * + * Copyright 2018 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity; + +import com.fasterxml.jackson.annotation.JsonValue; + +public interface ReplicationFactor { + + static NumericReplicationFactor of(int value) { + return new NumericReplicationFactor(value); + } + + static SatelliteReplicationFactor ofSatellite() { + return SatelliteReplicationFactor.INSTANCE; + } + + @JsonValue + Object get(); + + enum SatelliteReplicationFactor implements ReplicationFactor { + INSTANCE; + + @Override + public String get() { + return "satellite"; + } + } + + final class NumericReplicationFactor implements ReplicationFactor { + + private final Integer value; + + public NumericReplicationFactor(Integer value) { + this.value = value; + } + + @Override + public Integer get() { + return value; + } + } +} diff --git a/src/main/java/com/arangodb/entity/ServerMode.java b/core/src/main/java/com/arangodb/entity/ServerMode.java similarity index 93% rename from src/main/java/com/arangodb/entity/ServerMode.java rename to core/src/main/java/com/arangodb/entity/ServerMode.java index 2072cb064..4cb13610d 100644 --- a/src/main/java/com/arangodb/entity/ServerMode.java +++ b/core/src/main/java/com/arangodb/entity/ServerMode.java @@ -1,29 +1,28 @@ -/* - * DISCLAIMER - * - * Copyright 2017 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.entity; - -/** - * @author Mark Vollmary - * - */ -public enum ServerMode { - DEFAULT, RESILIENT -} +/* + * DISCLAIMER + * + * Copyright 2017 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity; + +/** + * @author Mark Vollmary + */ +public enum ServerMode { + DEFAULT, RESILIENT +} diff --git a/src/main/java/com/arangodb/entity/ServerRole.java b/core/src/main/java/com/arangodb/entity/ServerRole.java similarity index 89% rename from src/main/java/com/arangodb/entity/ServerRole.java rename to core/src/main/java/com/arangodb/entity/ServerRole.java index 5976e1107..a897651ff 100644 --- a/src/main/java/com/arangodb/entity/ServerRole.java +++ b/core/src/main/java/com/arangodb/entity/ServerRole.java @@ -1,29 +1,28 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.entity; - -/** - * @author Mark Vollmary - * - */ -public enum ServerRole { - SINGLE, AGENT, COORDINATOR, PRIMARY, SECONDARY, UNDEFINED -} +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity; + +/** + * @author Mark Vollmary + */ +public enum ServerRole { + SINGLE, AGENT, COORDINATOR, PRIMARY, SECONDARY, UNDEFINED +} \ No newline at end of file diff --git a/core/src/main/java/com/arangodb/entity/ShardEntity.java b/core/src/main/java/com/arangodb/entity/ShardEntity.java new file mode 100644 index 000000000..8ba767816 --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/ShardEntity.java @@ -0,0 +1,51 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity; + +import java.util.Objects; + +/** + * @author Michele Rastelli + */ +public final class ShardEntity { + + private String shardId; + + public ShardEntity() { + super(); + } + + public String getShardId() { + return shardId; + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof ShardEntity)) return false; + ShardEntity that = (ShardEntity) o; + return Objects.equals(shardId, that.shardId); + } + + @Override + public int hashCode() { + return Objects.hashCode(shardId); + } +} diff --git a/core/src/main/java/com/arangodb/entity/ShardingStrategy.java b/core/src/main/java/com/arangodb/entity/ShardingStrategy.java new file mode 100644 index 000000000..384e51554 --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/ShardingStrategy.java @@ -0,0 +1,45 @@ +/* + * DISCLAIMER + * + * Copyright 2019 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity; + +/** + * @author Axel Becker + */ +public enum ShardingStrategy { + + COMMUNITY_COMPAT("community-compat"), + ENTERPRISE_COMPAT("enterprise-compat"), + ENTERPRISE_SMART_EDGE_COMPAT("enterprise-smart-edge-compat"), + HASH("hash"), + ENTERPRISE_HASH_SMART_EDGE("enterprise-hash-smart-edge"), + ENTERPRISE_HEX_SMART_VERTEX("enterprise-hex-smart-vertex"); + + private final String internalName; + + ShardingStrategy(String internalName) { + this.internalName = internalName; + } + + public String getInternalName() { + return this.internalName; + } + +} diff --git a/src/main/java/com/arangodb/entity/DocumentCreateEntity.java b/core/src/main/java/com/arangodb/entity/StreamTransactionEntity.java similarity index 52% rename from src/main/java/com/arangodb/entity/DocumentCreateEntity.java rename to core/src/main/java/com/arangodb/entity/StreamTransactionEntity.java index b4447fbd2..674babd71 100644 --- a/src/main/java/com/arangodb/entity/DocumentCreateEntity.java +++ b/core/src/main/java/com/arangodb/entity/StreamTransactionEntity.java @@ -1,51 +1,53 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.entity; - -import com.arangodb.velocypack.annotations.Expose; - -/** - * @author Mark Vollmary - * - * @see API - * Documentation - */ -public class DocumentCreateEntity extends DocumentEntity { - - @Expose(deserialize = false) - private T newDocument; - - public DocumentCreateEntity() { - super(); - } - - /** - * @return If the query parameter returnNew is true, then the complete new document is returned. - */ - public T getNew() { - return newDocument; - } - - public void setNew(final T newDocument) { - this.newDocument = newDocument; - } - -} +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity; + +import java.util.Objects; + +/** + * @author Michele Rastelli + * @since ArangoDB 3.5.0 + */ +public final class StreamTransactionEntity { + + private String id; + private StreamTransactionStatus status; + + public String getId() { + return id; + } + + public StreamTransactionStatus getStatus() { + return status; + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof StreamTransactionEntity)) return false; + StreamTransactionEntity that = (StreamTransactionEntity) o; + return Objects.equals(id, that.id) && status == that.status; + } + + @Override + public int hashCode() { + return Objects.hash(id, status); + } +} diff --git a/src/main/java/com/arangodb/entity/IndexType.java b/core/src/main/java/com/arangodb/entity/StreamTransactionStatus.java similarity index 82% rename from src/main/java/com/arangodb/entity/IndexType.java rename to core/src/main/java/com/arangodb/entity/StreamTransactionStatus.java index b08f0a3d5..f03de5da5 100644 --- a/src/main/java/com/arangodb/entity/IndexType.java +++ b/core/src/main/java/com/arangodb/entity/StreamTransactionStatus.java @@ -1,29 +1,28 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.entity; - -/** - * @author Mark Vollmary - * - */ -public enum IndexType { - primary, hash, skiplist, persistent, geo, geo1, geo2, fulltext, edge -} +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity; + +/** + * @author Michele Rastelli + */ +public enum StreamTransactionStatus { + running, committed, aborted +} diff --git a/core/src/main/java/com/arangodb/entity/TransactionEntity.java b/core/src/main/java/com/arangodb/entity/TransactionEntity.java new file mode 100644 index 000000000..043c22819 --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/TransactionEntity.java @@ -0,0 +1,53 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity; + +import java.util.Objects; + +/** + * @author Michele Rastelli + * @since ArangoDB 3.5.0 + */ +public final class TransactionEntity { + + private String id; + private StreamTransactionStatus state; + + public String getId() { + return id; + } + + public StreamTransactionStatus getState() { + return state; + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof TransactionEntity)) return false; + TransactionEntity that = (TransactionEntity) o; + return Objects.equals(id, that.id) && state == that.state; + } + + @Override + public int hashCode() { + return Objects.hash(id, state); + } +} diff --git a/core/src/main/java/com/arangodb/entity/UserEntity.java b/core/src/main/java/com/arangodb/entity/UserEntity.java new file mode 100644 index 000000000..64b213439 --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/UserEntity.java @@ -0,0 +1,72 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity; + +import java.util.Map; +import java.util.Objects; + +/** + * @author Mark Vollmary + */ +public final class UserEntity { + + private String user; + private Boolean active; + private Map extra; + private Boolean changePassword; + + /** + * @return The name of the user as a string + */ + public String getUser() { + return user; + } + + /** + * @return An flag that specifies whether the user is active + */ + public Boolean getActive() { + return active; + } + + /** + * @return An object with arbitrary extra data about the user + */ + public Map getExtra() { + return extra; + } + + public Boolean getChangePassword() { + return changePassword; + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof UserEntity)) return false; + UserEntity that = (UserEntity) o; + return Objects.equals(user, that.user) && Objects.equals(active, that.active) && Objects.equals(extra, that.extra) && Objects.equals(changePassword, that.changePassword); + } + + @Override + public int hashCode() { + return Objects.hash(user, active, extra, changePassword); + } +} diff --git a/src/main/java/com/arangodb/entity/VertexEntity.java b/core/src/main/java/com/arangodb/entity/VertexEntity.java similarity index 85% rename from src/main/java/com/arangodb/entity/VertexEntity.java rename to core/src/main/java/com/arangodb/entity/VertexEntity.java index 77e24983f..4dfb6a55a 100644 --- a/src/main/java/com/arangodb/entity/VertexEntity.java +++ b/core/src/main/java/com/arangodb/entity/VertexEntity.java @@ -1,33 +1,32 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.entity; - -/** - * @author Mark Vollmary - * - */ -public class VertexEntity extends DocumentEntity { - - public VertexEntity() { - super(); - } - -} +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity; + +/** + * @author Mark Vollmary + */ +public final class VertexEntity extends DocumentEntity { + + public VertexEntity() { + super(); + } + +} diff --git a/src/main/java/com/arangodb/entity/TraversalEntity.java b/core/src/main/java/com/arangodb/entity/VertexUpdateEntity.java similarity index 52% rename from src/main/java/com/arangodb/entity/TraversalEntity.java rename to core/src/main/java/com/arangodb/entity/VertexUpdateEntity.java index 4571e94bd..22c2f0853 100644 --- a/src/main/java/com/arangodb/entity/TraversalEntity.java +++ b/core/src/main/java/com/arangodb/entity/VertexUpdateEntity.java @@ -1,56 +1,55 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.entity; - -import java.util.Collection; - -/** - * @author Mark Vollmary - * - * @see API - * Documentation - */ -public class TraversalEntity { - - private Collection vertices; - private Collection> paths; - - public TraversalEntity() { - super(); - } - - public Collection getVertices() { - return vertices; - } - - public void setVertices(final Collection vertices) { - this.vertices = vertices; - } - - public Collection> getPaths() { - return paths; - } - - public void setPaths(final Collection> paths) { - this.paths = paths; - } - -} +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity; + +import com.fasterxml.jackson.annotation.JsonProperty; + +import java.util.Objects; + +/** + * @author Mark Vollmary + */ +public final class VertexUpdateEntity extends DocumentEntity { + + @JsonProperty("_oldRev") + private String oldRev; + + public VertexUpdateEntity() { + super(); + } + + public String getOldRev() { + return oldRev; + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof VertexUpdateEntity)) return false; + if (!super.equals(o)) return false; + VertexUpdateEntity that = (VertexUpdateEntity) o; + return Objects.equals(oldRev, that.oldRev); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), oldRev); + } +} diff --git a/core/src/main/java/com/arangodb/entity/ViewEntity.java b/core/src/main/java/com/arangodb/entity/ViewEntity.java new file mode 100644 index 000000000..94c7e0a6d --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/ViewEntity.java @@ -0,0 +1,68 @@ +/* + * DISCLAIMER + * + * Copyright 2018 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity; + +import java.util.Objects; + +/** + * @author Mark Vollmary + */ +public class ViewEntity { + + private String id; + private String name; + private ViewType type; + + public ViewEntity() { + super(); + } + + public ViewEntity(final String id, final String name, final ViewType type) { + super(); + this.id = id; + this.name = name; + this.type = type; + } + + public String getId() { + return id; + } + + public String getName() { + return name; + } + + public ViewType getType() { + return type; + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof ViewEntity)) return false; + ViewEntity that = (ViewEntity) o; + return Objects.equals(id, that.id) && Objects.equals(name, that.name) && type == that.type; + } + + @Override + public int hashCode() { + return Objects.hash(id, name, type); + } +} diff --git a/src/main/java/com/arangodb/entity/QueryExecutionState.java b/core/src/main/java/com/arangodb/entity/ViewType.java similarity index 77% rename from src/main/java/com/arangodb/entity/QueryExecutionState.java rename to core/src/main/java/com/arangodb/entity/ViewType.java index 628ba0301..063553cd5 100644 --- a/src/main/java/com/arangodb/entity/QueryExecutionState.java +++ b/core/src/main/java/com/arangodb/entity/ViewType.java @@ -1,38 +1,35 @@ -/* - * DISCLAIMER - * - * Copyright 2018 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.entity; - -/** - * @author Mark Vollmary - * - */ -public enum QueryExecutionState { - INITIALIZING, - PARSING, - OPTIMIZING_AST, - LOADING_COLLECTIONS, - INSTANTIATING_PLAN, - OPTIMIZING_PLAN, - EXECUTING, - FINALIZING, - FINISHED, - INVALID -} +/* + * DISCLAIMER + * + * Copyright 2018 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity; + +import com.fasterxml.jackson.annotation.JsonProperty; + +/** + * @author Mark Vollmary + */ +public enum ViewType { + + @JsonProperty("arangosearch") + ARANGO_SEARCH, + @JsonProperty("search-alias") + SEARCH_ALIAS + +} diff --git a/core/src/main/java/com/arangodb/entity/arangosearch/AnalyzerFeature.java b/core/src/main/java/com/arangodb/entity/arangosearch/AnalyzerFeature.java new file mode 100644 index 000000000..e0ed2d54f --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/arangosearch/AnalyzerFeature.java @@ -0,0 +1,50 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity.arangosearch; + +/** + * @author Michele Rastelli + */ +public enum AnalyzerFeature { + + /** + * how often a term is seen, required for PHRASE() + */ + frequency, + + /** + * the field normalization factor + */ + norm, + + /** + * sequentially increasing term position, required for PHRASE(). If present then the frequency feature is also + * required. + */ + position, + + /** + * enable search highlighting capabilities (Enterprise Edition only). If present, then the `position` and `frequency` features are also required. + * @since ArangoDB 3.10 + */ + offset + +} diff --git a/src/main/java/com/arangodb/entity/VertexUpdateEntity.java b/core/src/main/java/com/arangodb/entity/arangosearch/AnalyzerType.java similarity index 65% rename from src/main/java/com/arangodb/entity/VertexUpdateEntity.java rename to core/src/main/java/com/arangodb/entity/arangosearch/AnalyzerType.java index 4d722af97..743c1e8be 100644 --- a/src/main/java/com/arangodb/entity/VertexUpdateEntity.java +++ b/core/src/main/java/com/arangodb/entity/arangosearch/AnalyzerType.java @@ -1,42 +1,46 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.entity; - -import com.arangodb.velocypack.annotations.SerializedName; - -/** - * @author Mark Vollmary - * - */ -public class VertexUpdateEntity extends DocumentEntity { - - @SerializedName("_oldRev") - private String oldRev; - - public VertexUpdateEntity() { - super(); - } - - public String getOldRev() { - return oldRev; - } - -} +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity.arangosearch; + +/** + * @author Michele Rastelli + */ +public enum AnalyzerType { + identity, + delimiter, + multi_delimiter, + stem, + norm, + ngram, + text, + pipeline, + stopwords, + aql, + geojson, + geopoint, + geo_s2, + segmentation, + collation, + classification, + nearest_neighbors, + minhash, + wildcard +} diff --git a/core/src/main/java/com/arangodb/entity/arangosearch/ArangoSearchCompression.java b/core/src/main/java/com/arangodb/entity/arangosearch/ArangoSearchCompression.java new file mode 100644 index 000000000..6bc05ee4c --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/arangosearch/ArangoSearchCompression.java @@ -0,0 +1,50 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity.arangosearch; + + +/** + * @author Michele Rastelli + * @since ArangoDB 3.7 + */ +public enum ArangoSearchCompression { + + /** + * (default): use LZ4 fast compression. + */ + lz4("lz4"), + + /** + * disable compression to trade space for speed. + */ + none("none"); + + private final String value; + + ArangoSearchCompression(String value) { + this.value = value; + } + + public String getValue() { + return value; + } + +} diff --git a/core/src/main/java/com/arangodb/entity/arangosearch/ArangoSearchPropertiesEntity.java b/core/src/main/java/com/arangodb/entity/arangosearch/ArangoSearchPropertiesEntity.java new file mode 100644 index 000000000..fa729e1e0 --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/arangosearch/ArangoSearchPropertiesEntity.java @@ -0,0 +1,153 @@ +/* + * DISCLAIMER + * + * Copyright 2018 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity.arangosearch; + +import com.arangodb.entity.ViewEntity; +import com.arangodb.internal.serde.InternalDeserializers; +import com.fasterxml.jackson.databind.annotation.JsonDeserialize; + +import java.util.Collection; +import java.util.Objects; + +/** + * @author Mark Vollmary + * @author Michele Rastelli + */ +public final class ArangoSearchPropertiesEntity extends ViewEntity { + + private Long consolidationIntervalMsec; + private Long commitIntervalMsec; + private Long cleanupIntervalStep; + private ConsolidationPolicy consolidationPolicy; + private Collection primarySort; + private Collection links; + private ArangoSearchCompression primarySortCompression; + private Collection storedValues; + private Collection optimizeTopK; + private Boolean primarySortCache; + private Boolean primaryKeyCache; + + /** + * @return Wait at least this many milliseconds between committing view data store changes and making documents + * visible to queries (default: 1000, to disable use: 0). For the case where there are a lot of inserts/updates, a + * lower value, until commit, will cause the index not to account for them and memory usage would continue to grow. + * For the case where there are a few inserts/updates, a higher value will impact performance and waste disk space + * for each commit call without any added benefits. Background: For data retrieval ArangoSearch views follow the + * concept of β€œeventually-consistent”, i.e. eventually all the data in ArangoDB will be matched by corresponding + * query expressions. The concept of ArangoSearch view β€œcommit” operation is introduced to control the upper-bound + * on the time until document addition/removals are actually reflected by corresponding query expressions. Once a + * β€œcommit” operation is complete all documents added/removed prior to the start of the β€œcommit” operation will be + * reflected by queries invoked in subsequent ArangoDB transactions, in-progress ArangoDB transactions will still + * continue to return a repeatable-read state. + */ + public Long getCommitIntervalMsec() { + return commitIntervalMsec; + } + + /** + * @return Wait at least this many milliseconds between committing index data changes and making them visible to + * queries (default: 60000, to disable use: 0). For the case where there are a lot of inserts/updates, a + * lower value, until commit, will cause the index not to account for them and memory usage would continue + * to grow. For the case where there are a few inserts/updates, a higher value will impact performance and + * waste disk space for each commit call without any added benefits. + */ + public Long getConsolidationIntervalMsec() { + return consolidationIntervalMsec; + } + + /** + * @return Wait at least this many commits between removing unused files in data directory (default: 10, to disable + * use: 0). For the case where the consolidation policies merge segments often (i.e. a lot of + * commit+consolidate), a lower value will cause a lot of disk space to be wasted. For the case where the + * consolidation policies rarely merge segments (i.e. few inserts/deletes), a higher value will impact + * performance without any added benefits. + */ + public Long getCleanupIntervalStep() { + return cleanupIntervalStep; + } + + public ConsolidationPolicy getConsolidationPolicy() { + return consolidationPolicy; + } + + /** + * @return A list of linked collections + */ + @JsonDeserialize(using = InternalDeserializers.CollectionLinksDeserializer.class) + public Collection getLinks() { + return links; + } + + /** + * @return A list of primary sort objects + */ + public Collection getPrimarySort() { + return primarySort; + } + + /** + * @return Defines how to compress the primary sort data (introduced in v3.7.0). ArangoDB v3.5 and v3.6 always + * compress the index using LZ4. + * @since ArangoDB 3.7 + */ + public ArangoSearchCompression getPrimarySortCompression() { + return primarySortCompression; + } + + /** + * @return An array of objects to describe which document attributes to store in the View index. It can then cover + * search queries, which means the data can be taken from the index directly and accessing the storage engine can be + * avoided. + * @since ArangoDB 3.7 + */ + public Collection getStoredValues() { + return storedValues; + } + + /** + * @return An array of strings defining optimized sort expressions. + * @since ArangoDB 3.11, Enterprise Edition only + */ + public Collection getOptimizeTopK() { + return optimizeTopK; + } + + public Boolean getPrimarySortCache() { + return primarySortCache; + } + + public Boolean getPrimaryKeyCache() { + return primaryKeyCache; + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof ArangoSearchPropertiesEntity)) return false; + if (!super.equals(o)) return false; + ArangoSearchPropertiesEntity that = (ArangoSearchPropertiesEntity) o; + return Objects.equals(consolidationIntervalMsec, that.consolidationIntervalMsec) && Objects.equals(commitIntervalMsec, that.commitIntervalMsec) && Objects.equals(cleanupIntervalStep, that.cleanupIntervalStep) && Objects.equals(consolidationPolicy, that.consolidationPolicy) && Objects.equals(primarySort, that.primarySort) && Objects.equals(links, that.links) && primarySortCompression == that.primarySortCompression && Objects.equals(storedValues, that.storedValues) && Objects.equals(optimizeTopK, that.optimizeTopK) && Objects.equals(primarySortCache, that.primarySortCache) && Objects.equals(primaryKeyCache, that.primaryKeyCache); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), consolidationIntervalMsec, commitIntervalMsec, cleanupIntervalStep, consolidationPolicy, primarySort, links, primarySortCompression, storedValues, optimizeTopK, primarySortCache, primaryKeyCache); + } +} diff --git a/core/src/main/java/com/arangodb/entity/arangosearch/CollectionLink.java b/core/src/main/java/com/arangodb/entity/arangosearch/CollectionLink.java new file mode 100644 index 000000000..eb33ebb20 --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/arangosearch/CollectionLink.java @@ -0,0 +1,203 @@ +/* + * DISCLAIMER + * + * Copyright 2018 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity.arangosearch; + +import com.arangodb.internal.serde.InternalDeserializers; +import com.arangodb.internal.serde.InternalSerializers; +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.databind.annotation.JsonDeserialize; +import com.fasterxml.jackson.databind.annotation.JsonSerialize; + +import java.util.Arrays; +import java.util.Collection; +import java.util.Objects; + +/** + * @author Mark Vollmary + */ +public final class CollectionLink { + + private final String name; + private Collection analyzers; + private Boolean includeAllFields; + private Boolean trackListPositions; + private StoreValuesType storeValues; + private Collection fields; + private Collection nested; + private Boolean inBackground; + private Boolean cache; + + private CollectionLink(final String name) { + super(); + this.name = name; + } + + /** + * Creates an instance of {@code CollectionLink} on the given collection name + * + * @param name Name of a collection + * @return new instance of {@code CollectionLink} + */ + @JsonCreator + public static CollectionLink on(@JsonProperty("name") final String name) { + return new CollectionLink(name); + } + + /** + * @param analyzers The list of analyzers to be used for indexing of string values (default: ["identity"]). + * @return link + */ + public CollectionLink analyzers(final String... analyzers) { + this.analyzers = Arrays.asList(analyzers); + return this; + } + + /** + * @param includeAllFields The flag determines whether or not to index all fields on a particular level of depth + * (default: + * false). + * @return link + */ + public CollectionLink includeAllFields(final Boolean includeAllFields) { + this.includeAllFields = includeAllFields; + return this; + } + + /** + * @param trackListPositions The flag determines whether or not values in a lists should be treated separate + * (default: false). + * @return link + */ + public CollectionLink trackListPositions(final Boolean trackListPositions) { + this.trackListPositions = trackListPositions; + return this; + } + + /** + * @param storeValues How should the view track the attribute values, this setting allows for additional value + * retrieval + * optimizations (default "none"). + * @return link + */ + public CollectionLink storeValues(final StoreValuesType storeValues) { + this.storeValues = storeValues; + return this; + } + + /** + * @param fields A list of linked fields + * @return link + */ + @JsonDeserialize(using = InternalDeserializers.FieldLinksDeserializer.class) + public CollectionLink fields(final FieldLink... fields) { + this.fields = Arrays.asList(fields); + return this; + } + + /** + * @param nested A list of nested fields + * @return link + * @since ArangoDB 3.10 + */ + @JsonDeserialize(using = InternalDeserializers.FieldLinksDeserializer.class) + public CollectionLink nested(final FieldLink... nested) { + this.nested = Arrays.asList(nested); + return this; + } + + /** + * @param inBackground If set to true, then no exclusive lock is used on the source collection during View index + * creation, so that it remains basically available. inBackground is an option that can be set + * when adding links. It does not get persisted as it is not a View property, but only a + * one-off option. (default: false) + * @return link + */ + public CollectionLink inBackground(final Boolean inBackground) { + this.inBackground = inBackground; + return this; + } + + /** + * @param cache If you enable this option, then field normalization values are always cached in memory. This can + * improve the performance of scoring and ranking queries. Otherwise, these values are memory-mapped + * and it is up to the operating system to load them from disk into memory and to evict them from + * memory. + * @return link + * @since ArangoDB 3.9.5, Enterprise Edition only + */ + public CollectionLink cache(final Boolean cache) { + this.cache = cache; + return this; + } + + @JsonIgnore + public String getName() { + return name; + } + + public Collection getAnalyzers() { + return analyzers; + } + + public Boolean getIncludeAllFields() { + return includeAllFields; + } + + public Boolean getTrackListPositions() { + return trackListPositions; + } + + public StoreValuesType getStoreValues() { + return storeValues; + } + + @JsonSerialize(using = InternalSerializers.FieldLinksSerializer.class) + public Collection getFields() { + return fields; + } + + @JsonSerialize(using = InternalSerializers.FieldLinksSerializer.class) + public Collection getNested() { + return nested; + } + + public Boolean getInBackground() { + return inBackground; + } + + public Boolean getCache() { + return cache; + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof CollectionLink)) return false; + CollectionLink that = (CollectionLink) o; + return Objects.equals(name, that.name) && Objects.equals(analyzers, that.analyzers) && Objects.equals(includeAllFields, that.includeAllFields) && Objects.equals(trackListPositions, that.trackListPositions) && storeValues == that.storeValues && Objects.equals(fields, that.fields) && Objects.equals(nested, that.nested) && Objects.equals(inBackground, that.inBackground) && Objects.equals(cache, that.cache); + } + + @Override + public int hashCode() { + return Objects.hash(name, analyzers, includeAllFields, trackListPositions, storeValues, fields, nested, inBackground, cache); + } +} \ No newline at end of file diff --git a/core/src/main/java/com/arangodb/entity/arangosearch/ConsolidationPolicy.java b/core/src/main/java/com/arangodb/entity/arangosearch/ConsolidationPolicy.java new file mode 100644 index 000000000..cfba95e74 --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/arangosearch/ConsolidationPolicy.java @@ -0,0 +1,144 @@ +/* + * DISCLAIMER + * + * Copyright 2018 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity.arangosearch; + +import java.util.Objects; + +/** + * @author Mark Vollmary + */ +public final class ConsolidationPolicy { + + private ConsolidationType type; + private Double threshold; + private Long segmentsMin; + private Long segmentsMax; + private Long segmentsBytesMax; + private Long segmentsBytesFloor; + private Long minScore; + + public static ConsolidationPolicy of(final ConsolidationType type) { + return new ConsolidationPolicy().type(type); + } + + public ConsolidationPolicy type(final ConsolidationType type) { + this.type = type; + return this; + } + + /** + * @param threshold value in the range [0.0, 1.0] + * @return this + */ + public ConsolidationPolicy threshold(final Double threshold) { + this.threshold = threshold; + return this; + } + + public ConsolidationType getType() { + return type; + } + + public Double getThreshold() { + return threshold; + } + + public Long getSegmentsMin() { + return segmentsMin; + } + + /** + * @param segmentsMin The minimum number of segments that will be evaluated as candidates for consolidation. + * (default: 1) + * @return this + */ + public ConsolidationPolicy segmentsMin(final Long segmentsMin) { + this.segmentsMin = segmentsMin; + return this; + } + + public Long getSegmentsMax() { + return segmentsMax; + } + + /** + * @param segmentsMax The maximum number of segments that will be evaluated as candidates for consolidation. + * (default: 10) + * @return this + */ + public ConsolidationPolicy segmentsMax(final Long segmentsMax) { + this.segmentsMax = segmentsMax; + return this; + } + + public Long getSegmentsBytesMax() { + return segmentsBytesMax; + } + + /** + * @param segmentsBytesMax Maximum allowed size of all consolidated segments in bytes. (default: 5368709120) + * @return this + */ + public ConsolidationPolicy segmentsBytesMax(final Long segmentsBytesMax) { + this.segmentsBytesMax = segmentsBytesMax; + return this; + } + + public Long getSegmentsBytesFloor() { + return segmentsBytesFloor; + } + + /** + * @param segmentsBytesFloor Defines the value (in bytes) to treat all smaller segments as equal for consolidation + * selection. (default: 2097152) + * @return this + */ + public ConsolidationPolicy segmentsBytesFloor(final Long segmentsBytesFloor) { + this.segmentsBytesFloor = segmentsBytesFloor; + return this; + } + + public Long getMinScore() { + return minScore; + } + + /** + * @param minScore Filter out consolidation candidates with a score less than this. (default: 0) + * @return this + */ + public ConsolidationPolicy minScore(final Long minScore) { + this.minScore = minScore; + return this; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + ConsolidationPolicy that = (ConsolidationPolicy) o; + return type == that.type && Objects.equals(threshold, that.threshold) && Objects.equals(segmentsMin, that.segmentsMin) && Objects.equals(segmentsMax, that.segmentsMax) && Objects.equals(segmentsBytesMax, that.segmentsBytesMax) && Objects.equals(segmentsBytesFloor, that.segmentsBytesFloor) && Objects.equals(minScore, that.minScore); + } + + @Override + public int hashCode() { + return Objects.hash(type, threshold, segmentsMin, segmentsMax, segmentsBytesMax, segmentsBytesFloor, minScore); + } +} diff --git a/core/src/main/java/com/arangodb/entity/arangosearch/ConsolidationType.java b/core/src/main/java/com/arangodb/entity/arangosearch/ConsolidationType.java new file mode 100644 index 000000000..1eb3b1ebc --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/arangosearch/ConsolidationType.java @@ -0,0 +1,18 @@ +package com.arangodb.entity.arangosearch; + +import com.fasterxml.jackson.annotation.JsonProperty; + +public enum ConsolidationType { + + /** + * @deprecated The β€œbytes_accum” policy type is deprecated and remains in ArangoSearch for backwards compatibility + * with the older versions. Please make sure to always use the β€œtier” policy instead. + */ + @Deprecated + @JsonProperty("bytes_accum") + BYTES_ACCUM, + + @JsonProperty("tier") + TIER + +} \ No newline at end of file diff --git a/core/src/main/java/com/arangodb/entity/arangosearch/FieldLink.java b/core/src/main/java/com/arangodb/entity/arangosearch/FieldLink.java new file mode 100644 index 000000000..1d2f0f8f8 --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/arangosearch/FieldLink.java @@ -0,0 +1,180 @@ +package com.arangodb.entity.arangosearch; + +import com.arangodb.internal.serde.InternalDeserializers; +import com.arangodb.internal.serde.InternalSerializers; +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.databind.annotation.JsonDeserialize; +import com.fasterxml.jackson.databind.annotation.JsonSerialize; + +import java.util.Arrays; +import java.util.Collection; +import java.util.Objects; + +public final class FieldLink { + + private final String name; + private Collection analyzers; + private Boolean includeAllFields; + private Boolean trackListPositions; + private StoreValuesType storeValues; + private Collection fields; + private Collection nested; + private Boolean inBackground; + private Boolean cache; + + private FieldLink(final String name) { + super(); + this.name = name; + } + + /** + * Creates an instance of {@code FieldLink} on the given field name + * + * @param name Name of a field + * @return new instance of {@code FieldLink} + */ + @JsonCreator + public static FieldLink on(@JsonProperty("name") final String name) { + return new FieldLink(name); + } + + /** + * @param analyzers The list of analyzers to be used for indexing of string values (default: ["identity"]). + * @return link + */ + public FieldLink analyzers(final String... analyzers) { + this.analyzers = Arrays.asList(analyzers); + return this; + } + + /** + * @param includeAllFields The flag determines whether or not to index all fields on a particular level of depth + * (default: + * false). + * @return link + */ + public FieldLink includeAllFields(final Boolean includeAllFields) { + this.includeAllFields = includeAllFields; + return this; + } + + /** + * @param trackListPositions The flag determines whether or not values in a lists should be treated separate + * (default: false). + * @return link + */ + public FieldLink trackListPositions(final Boolean trackListPositions) { + this.trackListPositions = trackListPositions; + return this; + } + + /** + * @param storeValues How should the view track the attribute values, this setting allows for additional value + * retrieval + * optimizations (default "none"). + * @return link + */ + public FieldLink storeValues(final StoreValuesType storeValues) { + this.storeValues = storeValues; + return this; + } + + /** + * @param fields A list of linked fields + * @return link + */ + @JsonDeserialize(using = InternalDeserializers.FieldLinksDeserializer.class) + public FieldLink fields(final FieldLink... fields) { + this.fields = Arrays.asList(fields); + return this; + } + + /** + * @param nested A list of nested fields + * @return link + * @since ArangoDB 3.10 + */ + @JsonDeserialize(using = InternalDeserializers.FieldLinksDeserializer.class) + public FieldLink nested(final FieldLink... nested) { + this.nested = Arrays.asList(nested); + return this; + } + + /** + * @param inBackground If set to true, then no exclusive lock is used on the source collection during View index + * creation, so that it remains basically available. inBackground is an option that can be set + * when adding links. It does not get persisted as it is not a View property, but only a + * one-off option. (default: false) + * @return link + */ + public FieldLink inBackground(final Boolean inBackground) { + this.inBackground = inBackground; + return this; + } + + /** + * @param cache If you enable this option, then field normalization values are always cached in memory. This can + * improve the performance of scoring and ranking queries. Otherwise, these values are memory-mapped + * and it is up to the operating system to load them from disk into memory and to evict them from + * memory. + * @return link + * @since ArangoDB 3.9.5, Enterprise Edition only + */ + public FieldLink cache(final Boolean cache) { + this.cache = cache; + return this; + } + + @JsonIgnore + public String getName() { + return name; + } + + public Collection getAnalyzers() { + return analyzers; + } + + public Boolean getIncludeAllFields() { + return includeAllFields; + } + + public Boolean getTrackListPositions() { + return trackListPositions; + } + + public StoreValuesType getStoreValues() { + return storeValues; + } + + @JsonSerialize(using = InternalSerializers.FieldLinksSerializer.class) + public Collection getFields() { + return fields; + } + + @JsonSerialize(using = InternalSerializers.FieldLinksSerializer.class) + public Collection getNested() { + return nested; + } + + public Boolean getInBackground() { + return inBackground; + } + + public Boolean getCache() { + return cache; + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof FieldLink)) return false; + FieldLink fieldLink = (FieldLink) o; + return Objects.equals(name, fieldLink.name) && Objects.equals(analyzers, fieldLink.analyzers) && Objects.equals(includeAllFields, fieldLink.includeAllFields) && Objects.equals(trackListPositions, fieldLink.trackListPositions) && storeValues == fieldLink.storeValues && Objects.equals(fields, fieldLink.fields) && Objects.equals(nested, fieldLink.nested) && Objects.equals(inBackground, fieldLink.inBackground) && Objects.equals(cache, fieldLink.cache); + } + + @Override + public int hashCode() { + return Objects.hash(name, analyzers, includeAllFields, trackListPositions, storeValues, fields, nested, inBackground, cache); + } +} \ No newline at end of file diff --git a/core/src/main/java/com/arangodb/entity/arangosearch/PrimarySort.java b/core/src/main/java/com/arangodb/entity/arangosearch/PrimarySort.java new file mode 100644 index 000000000..10b054108 --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/arangosearch/PrimarySort.java @@ -0,0 +1,103 @@ +/* + * DISCLAIMER + * + * Copyright 2019 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity.arangosearch; + +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.fasterxml.jackson.annotation.JsonProperty; + +import java.util.Objects; + +/** + * @author Heiko Kernbach + */ +public final class PrimarySort { + + private final String fieldName; + private Boolean ascending; + + public PrimarySort( + @JsonProperty("field") String field, + @JsonProperty("asc") Boolean asc + ) { + this.fieldName = field; + this.ascending = asc; + } + + private PrimarySort(final String fieldName) { + super(); + this.fieldName = fieldName; + } + + public static PrimarySort on(final String fieldName) { + return new PrimarySort(fieldName); + } + + /** + * @param ascending + * @return primarySort + */ + public PrimarySort ascending(final Boolean ascending) { + this.ascending = ascending; + return this; + } + + @JsonIgnore + public Boolean getAscending() { + return ascending; + } + + public Direction getDirection() { + if (ascending == null) { + return null; + } + return ascending ? Direction.asc : Direction.desc; + } + + /** + * @deprecated for removal, use {@link #getField()} instead + */ + @Deprecated + @JsonIgnore + public String getFieldName() { + return getField(); + } + + public String getField() { + return fieldName; + } + + public enum Direction { + asc, + desc + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof PrimarySort)) return false; + PrimarySort that = (PrimarySort) o; + return Objects.equals(fieldName, that.fieldName) && Objects.equals(ascending, that.ascending); + } + + @Override + public int hashCode() { + return Objects.hash(fieldName, ascending); + } +} diff --git a/core/src/main/java/com/arangodb/entity/arangosearch/SearchAliasIndex.java b/core/src/main/java/com/arangodb/entity/arangosearch/SearchAliasIndex.java new file mode 100644 index 000000000..7d92d2768 --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/arangosearch/SearchAliasIndex.java @@ -0,0 +1,67 @@ +package com.arangodb.entity.arangosearch; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; + +import java.util.Objects; + +/** + * @author Michele Rastelli + * @since ArabgoDB 3.10 + */ +public final class SearchAliasIndex { + private final String collection; + private final String index; + private final OperationType operation; + + /** + * @param collection The name of a collection. + * @param index The name of an inverted index of the collection. + */ + public SearchAliasIndex(String collection, String index) { + this(collection, index, null); + } + + /** + * @param collection The name of a collection. + * @param index The name of an inverted index of the collection. + * @param operation Whether to add or remove the index to the stored indexes property of the View. (default "add") + */ + @JsonCreator + public SearchAliasIndex( + @JsonProperty("collection") String collection, + @JsonProperty("index") String index, + @JsonProperty("operation") OperationType operation) { + this.collection = collection; + this.index = index; + this.operation = operation; + } + + public String getCollection() { + return collection; + } + + public String getIndex() { + return index; + } + + public OperationType getOperation() { + return operation; + } + + public enum OperationType { + add, del + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof SearchAliasIndex)) return false; + SearchAliasIndex that = (SearchAliasIndex) o; + return Objects.equals(collection, that.collection) && Objects.equals(index, that.index) && operation == that.operation; + } + + @Override + public int hashCode() { + return Objects.hash(collection, index, operation); + } +} diff --git a/core/src/main/java/com/arangodb/entity/arangosearch/SearchAliasPropertiesEntity.java b/core/src/main/java/com/arangodb/entity/arangosearch/SearchAliasPropertiesEntity.java new file mode 100644 index 000000000..208c17664 --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/arangosearch/SearchAliasPropertiesEntity.java @@ -0,0 +1,55 @@ +/* + * DISCLAIMER + * + * Copyright 2018 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity.arangosearch; + +import com.arangodb.entity.ViewEntity; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Objects; + +/** + * @author Michele Rastelli + */ +public final class SearchAliasPropertiesEntity extends ViewEntity { + + private final Collection indexes = new ArrayList<>(); + + /** + * @return A list of inverted indexes to add to the View. + */ + public Collection getIndexes() { + return indexes; + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof SearchAliasPropertiesEntity)) return false; + if (!super.equals(o)) return false; + SearchAliasPropertiesEntity that = (SearchAliasPropertiesEntity) o; + return Objects.equals(indexes, that.indexes); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), indexes); + } +} diff --git a/core/src/main/java/com/arangodb/entity/arangosearch/StoreValuesType.java b/core/src/main/java/com/arangodb/entity/arangosearch/StoreValuesType.java new file mode 100644 index 000000000..23577a4f8 --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/arangosearch/StoreValuesType.java @@ -0,0 +1,42 @@ +/* + * DISCLAIMER + * + * Copyright 2018 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity.arangosearch; + +import com.fasterxml.jackson.annotation.JsonProperty; + +/** + * @author Mark Vollmary + */ +public enum StoreValuesType { + + /** + * Do not track values by the view + */ + @JsonProperty("none") + NONE, + + /** + * Track only value presence, to allow use of the EXISTS() function. + */ + @JsonProperty("id") + ID + +} diff --git a/core/src/main/java/com/arangodb/entity/arangosearch/StoredValue.java b/core/src/main/java/com/arangodb/entity/arangosearch/StoredValue.java new file mode 100644 index 000000000..d300b7f99 --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/arangosearch/StoredValue.java @@ -0,0 +1,85 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity.arangosearch; + + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; + +import java.util.List; +import java.util.Objects; + +/** + * @author Michele Rastelli + * @since ArangoDB 3.7 + */ +public final class StoredValue { + + private final List fields; + private final ArangoSearchCompression compression; + private final Boolean cache; + + /** + * @param fields A list of attribute paths. The . character denotes sub-attributes. + * @param compression Defines how to compress the attribute values. + * @param cache Whether to cache stored values in memory. (Since ArangoDB 3.9.5, Enterprise Edition only) + */ + @JsonCreator + public StoredValue(@JsonProperty("fields") List fields, + @JsonProperty("compression") ArangoSearchCompression compression, + @JsonProperty("cache") Boolean cache) { + this.fields = fields; + this.compression = compression; + this.cache = cache; + } + + public StoredValue(List fields, ArangoSearchCompression compression) { + this(fields, compression, null); + } + + public StoredValue(List fields) { + this(fields, null); + } + + public List getFields() { + return fields; + } + + public ArangoSearchCompression getCompression() { + return compression; + } + + public Boolean getCache() { + return cache; + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof StoredValue)) return false; + StoredValue that = (StoredValue) o; + return Objects.equals(fields, that.fields) && compression == that.compression && Objects.equals(cache, that.cache); + } + + @Override + public int hashCode() { + return Objects.hash(fields, compression, cache); + } +} diff --git a/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/AQLAnalyzer.java b/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/AQLAnalyzer.java new file mode 100644 index 000000000..1cb8b1b7d --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/AQLAnalyzer.java @@ -0,0 +1,63 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity.arangosearch.analyzer; + + +import com.arangodb.entity.arangosearch.AnalyzerType; + +import java.util.Objects; + +/** + * An Analyzer capable of running a restricted AQL query to perform data manipulation / filtering. + * + * @author Michele Rastelli + * @see API Documentation + * @since ArangoDB 3.8 + */ +public final class AQLAnalyzer extends SearchAnalyzer { + private AQLAnalyzerProperties properties; + + public AQLAnalyzer() { + setType(AnalyzerType.aql); + } + + public AQLAnalyzerProperties getProperties() { + return properties; + } + + public void setProperties(AQLAnalyzerProperties properties) { + this.properties = properties; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + if (!super.equals(o)) return false; + AQLAnalyzer that = (AQLAnalyzer) o; + return Objects.equals(properties, that.properties); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), properties); + } +} diff --git a/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/AQLAnalyzerProperties.java b/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/AQLAnalyzerProperties.java new file mode 100644 index 000000000..acb3c2a29 --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/AQLAnalyzerProperties.java @@ -0,0 +1,143 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + + +package com.arangodb.entity.arangosearch.analyzer; + + +import java.util.Objects; + +/** + * @author Michele Rastelli + */ +public final class AQLAnalyzerProperties { + + private String queryString; + private Boolean collapsePositions; + private Boolean keepNull; + private Integer batchSize; + private Long memoryLimit; + + private ReturnType returnType; + + /** + * @return AQL query to be executed + */ + public String getQueryString() { + return queryString; + } + + public void setQueryString(String queryString) { + this.queryString = queryString; + } + + /** + * @return
    + *
  • + * true: set the position to 0 for all members of the query result array + *
  • + *
  • + * false (default): set the position corresponding to the index of the result array member + *
  • + *
+ */ + public Boolean getCollapsePositions() { + return collapsePositions; + } + + public void setCollapsePositions(Boolean collapsePositions) { + this.collapsePositions = collapsePositions; + } + + /** + * @return
    + *
  • + * true (default): treat null like an empty string + *
  • + *
  • + * false: discard nulls from View index. Can be used for index filtering (i.e. make your query return null for + * unwanted data). Note that empty results are always discarded. + *
  • + *
+ */ + public Boolean getKeepNull() { + return keepNull; + } + + public void setKeepNull(Boolean keepNull) { + this.keepNull = keepNull; + } + + /** + * @return number between 1 and 1000 (default = 1) that determines the batch size for reading data from the query. + * In general, a single token is expected to be returned. However, if the query is expected to return many results, + * then increasing batchSize trades memory for performance. + */ + public Integer getBatchSize() { + return batchSize; + } + + public void setBatchSize(Integer batchSize) { + this.batchSize = batchSize; + } + + /** + * @return memory limit for query execution in bytes. (default is 1048576 = 1Mb) Maximum is 33554432U (32Mb) + */ + public Long getMemoryLimit() { + return memoryLimit; + } + + public void setMemoryLimit(Long memoryLimit) { + this.memoryLimit = memoryLimit; + } + + /** + * @return data type of the returned tokens. If the indicated type does not match the actual type then an implicit + * type conversion is applied. + */ + public ReturnType getReturnType() { + return returnType; + } + + public void setReturnType(ReturnType returnType) { + this.returnType = returnType; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + AQLAnalyzerProperties that = (AQLAnalyzerProperties) o; + return Objects.equals(queryString, that.queryString) && Objects.equals(collapsePositions, + that.collapsePositions) && Objects.equals(keepNull, that.keepNull) && Objects.equals(batchSize, + that.batchSize) && Objects.equals(memoryLimit, that.memoryLimit) && returnType == that.returnType; + } + + @Override + public int hashCode() { + return Objects.hash(queryString, collapsePositions, keepNull, batchSize, memoryLimit, returnType); + } + + public enum ReturnType { + string, number, bool + } + +} diff --git a/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/ClassificationAnalyzer.java b/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/ClassificationAnalyzer.java new file mode 100644 index 000000000..1f0fc2157 --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/ClassificationAnalyzer.java @@ -0,0 +1,64 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity.arangosearch.analyzer; + + +import com.arangodb.entity.arangosearch.AnalyzerType; + +import java.util.Objects; + +/** + * An Analyzer capable of classifying tokens in the input text. It applies a user-provided supervised fastText word + * embedding model to classify the input text. It is able to classify individual tokens as well as entire inputs. + * + * @author Michele Rastelli + * @see API Documentation + * @since ArangoDB 3.10 + */ +public final class ClassificationAnalyzer extends SearchAnalyzer { + public ClassificationAnalyzer() { + setType(AnalyzerType.classification); + } + + private ClassificationAnalyzerProperties properties; + + public ClassificationAnalyzerProperties getProperties() { + return properties; + } + + public void setProperties(ClassificationAnalyzerProperties properties) { + this.properties = properties; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + if (!super.equals(o)) return false; + ClassificationAnalyzer that = (ClassificationAnalyzer) o; + return Objects.equals(properties, that.properties); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), properties); + } +} diff --git a/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/ClassificationAnalyzerProperties.java b/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/ClassificationAnalyzerProperties.java new file mode 100644 index 000000000..00ee39c88 --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/ClassificationAnalyzerProperties.java @@ -0,0 +1,78 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity.arangosearch.analyzer; + + +import com.fasterxml.jackson.annotation.JsonProperty; + +import java.util.Objects; + +/** + * @author Michele Rastelli + * @since ArangoDB 3.10 + */ +public final class ClassificationAnalyzerProperties { + + @JsonProperty("model_location") + private String modelLocation; + + @JsonProperty("top_k") + private Integer topK; + + private Double threshold; + + public String getModelLocation() { + return modelLocation; + } + + public void setModelLocation(String modelLocation) { + this.modelLocation = modelLocation; + } + + public Integer getTopK() { + return topK; + } + + public void setTopK(Integer topK) { + this.topK = topK; + } + + public Double getThreshold() { + return threshold; + } + + public void setThreshold(Double threshold) { + this.threshold = threshold; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + ClassificationAnalyzerProperties that = (ClassificationAnalyzerProperties) o; + return Objects.equals(modelLocation, that.modelLocation) && Objects.equals(topK, that.topK) && Objects.equals(threshold, that.threshold); + } + + @Override + public int hashCode() { + return Objects.hash(modelLocation, topK, threshold); + } +} diff --git a/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/CollationAnalyzer.java b/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/CollationAnalyzer.java new file mode 100644 index 000000000..5030671bf --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/CollationAnalyzer.java @@ -0,0 +1,64 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity.arangosearch.analyzer; + + +import com.arangodb.entity.arangosearch.AnalyzerType; + +import java.util.Objects; + +/** + * An Analyzer capable of converting the input into a set of language-specific tokens. This makes comparisons follow the + * rules of the respective language, most notable in range queries against Views. + * + * @author Michele Rastelli + * @see API Documentation + * @since ArangoDB 3.9 + */ +public final class CollationAnalyzer extends SearchAnalyzer { + private CollationAnalyzerProperties properties; + + public CollationAnalyzer() { + setType(AnalyzerType.collation); + } + + public CollationAnalyzerProperties getProperties() { + return properties; + } + + public void setProperties(CollationAnalyzerProperties properties) { + this.properties = properties; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + if (!super.equals(o)) return false; + CollationAnalyzer that = (CollationAnalyzer) o; + return Objects.equals(properties, that.properties); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), properties); + } +} diff --git a/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/CollationAnalyzerProperties.java b/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/CollationAnalyzerProperties.java new file mode 100644 index 000000000..428f8bc54 --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/CollationAnalyzerProperties.java @@ -0,0 +1,61 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity.arangosearch.analyzer; + + +import java.util.Objects; + +/** + * @author Michele Rastelli + * @since ArangoDB 3.9 + */ +public final class CollationAnalyzerProperties { + + private String locale; + + /** + * @return a locale in the format `language[_COUNTRY][.encoding][@variant]` (square brackets denote optional parts), + * e.g. `de.utf-8` or `en_US.utf-8`. Only UTF-8 encoding is meaningful in ArangoDB. + * The locale is forwarded to ICU without checks. An invalid locale does not prevent the creation of the Analyzer. + * @see + * Supported Languages + */ + public String getLocale() { + return locale; + } + + public void setLocale(String locale) { + this.locale = locale; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + CollationAnalyzerProperties that = (CollationAnalyzerProperties) o; + return Objects.equals(locale, that.locale); + } + + @Override + public int hashCode() { + return Objects.hash(locale); + } +} diff --git a/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/DelimiterAnalyzer.java b/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/DelimiterAnalyzer.java new file mode 100644 index 000000000..5e8aae6f7 --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/DelimiterAnalyzer.java @@ -0,0 +1,63 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity.arangosearch.analyzer; + + +import com.arangodb.entity.arangosearch.AnalyzerType; + +import java.util.Objects; + +/** + * An Analyzer capable of breaking up delimited text into tokens as per RFC 4180 (without starting new records on + * newlines). + * + * @author Michele Rastelli + * @see API Documentation + */ +public final class DelimiterAnalyzer extends SearchAnalyzer { + private DelimiterAnalyzerProperties properties; + + public DelimiterAnalyzer() { + setType(AnalyzerType.delimiter); + } + + public DelimiterAnalyzerProperties getProperties() { + return properties; + } + + public void setProperties(DelimiterAnalyzerProperties properties) { + this.properties = properties; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + if (!super.equals(o)) return false; + DelimiterAnalyzer that = (DelimiterAnalyzer) o; + return Objects.equals(properties, that.properties); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), properties); + } +} diff --git a/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/DelimiterAnalyzerProperties.java b/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/DelimiterAnalyzerProperties.java new file mode 100644 index 000000000..21c8ef741 --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/DelimiterAnalyzerProperties.java @@ -0,0 +1,56 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity.arangosearch.analyzer; + + +import java.util.Objects; + +/** + * @author Michele Rastelli + */ +public final class DelimiterAnalyzerProperties { + + private String delimiter; + + /** + * @return the delimiting character(s) + */ + public String getDelimiter() { + return delimiter; + } + + public void setDelimiter(String delimiter) { + this.delimiter = delimiter; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + DelimiterAnalyzerProperties that = (DelimiterAnalyzerProperties) o; + return Objects.equals(delimiter, that.delimiter); + } + + @Override + public int hashCode() { + return Objects.hash(delimiter); + } +} diff --git a/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/EdgeNgram.java b/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/EdgeNgram.java new file mode 100644 index 000000000..35c4536da --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/EdgeNgram.java @@ -0,0 +1,81 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity.arangosearch.analyzer; + + +import java.util.Objects; + +/** + * @author Michele Rastelli + */ +public final class EdgeNgram { + private long min; + private long max; + private boolean preserveOriginal; + + /** + * @return minimal n-gram length + */ + public long getMin() { + return min; + } + + public void setMin(long min) { + this.min = min; + } + + /** + * @return maximal n-gram length + */ + public long getMax() { + return max; + } + + public void setMax(long max) { + this.max = max; + } + + /** + * @return whether to include the original token even if its length is less than min or greater than max + */ + public boolean isPreserveOriginal() { + return preserveOriginal; + } + + public void setPreserveOriginal(boolean preserveOriginal) { + this.preserveOriginal = preserveOriginal; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + EdgeNgram edgeNgram = (EdgeNgram) o; + return min == edgeNgram.min && + max == edgeNgram.max && + preserveOriginal == edgeNgram.preserveOriginal; + } + + @Override + public int hashCode() { + return Objects.hash(min, max, preserveOriginal); + } +} diff --git a/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/GeoAnalyzerOptions.java b/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/GeoAnalyzerOptions.java new file mode 100644 index 000000000..a6a2f6762 --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/GeoAnalyzerOptions.java @@ -0,0 +1,81 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + + +package com.arangodb.entity.arangosearch.analyzer; + + +import java.util.Objects; + +/** + * @author Michele Rastelli + */ +public final class GeoAnalyzerOptions { + + private Integer maxCells; + private Integer minLevel; + private Integer maxLevel; + + /** + * @return maximum number of S2 cells (default: 20) + */ + public Integer getMaxCells() { + return maxCells; + } + + public void setMaxCells(Integer maxCells) { + this.maxCells = maxCells; + } + + /** + * @return the least precise S2 level (default: 4) + */ + public Integer getMinLevel() { + return minLevel; + } + + public void setMinLevel(Integer minLevel) { + this.minLevel = minLevel; + } + + /** + * @return the most precise S2 level (default: 23) + */ + public Integer getMaxLevel() { + return maxLevel; + } + + public void setMaxLevel(Integer maxLevel) { + this.maxLevel = maxLevel; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + GeoAnalyzerOptions that = (GeoAnalyzerOptions) o; + return Objects.equals(maxCells, that.maxCells) && Objects.equals(minLevel, that.minLevel) && Objects.equals(maxLevel, that.maxLevel); + } + + @Override + public int hashCode() { + return Objects.hash(maxCells, minLevel, maxLevel); + } +} diff --git a/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/GeoJSONAnalyzer.java b/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/GeoJSONAnalyzer.java new file mode 100644 index 000000000..3071ddc1c --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/GeoJSONAnalyzer.java @@ -0,0 +1,64 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + + +package com.arangodb.entity.arangosearch.analyzer; + +import com.arangodb.entity.arangosearch.AnalyzerType; + +import java.util.Objects; + +/** + * An Analyzer capable of breaking up a GeoJSON object into a set of indexable tokens for further usage with + * ArangoSearch Geo functions. + * + * @author Michele Rastelli + * @see API Documentation + * @since ArangoDB 3.8 + */ +public final class GeoJSONAnalyzer extends SearchAnalyzer { + private GeoJSONAnalyzerProperties properties; + + public GeoJSONAnalyzer() { + setType(AnalyzerType.geojson); + } + + public GeoJSONAnalyzerProperties getProperties() { + return properties; + } + + public void setProperties(GeoJSONAnalyzerProperties properties) { + this.properties = properties; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + if (!super.equals(o)) return false; + GeoJSONAnalyzer that = (GeoJSONAnalyzer) o; + return Objects.equals(properties, that.properties); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), properties); + } +} diff --git a/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/GeoJSONAnalyzerProperties.java b/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/GeoJSONAnalyzerProperties.java new file mode 100644 index 000000000..2a7cc5594 --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/GeoJSONAnalyzerProperties.java @@ -0,0 +1,104 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity.arangosearch.analyzer; + + +import java.util.Objects; + +/** + * @author Michele Rastelli + */ +public final class GeoJSONAnalyzerProperties { + + private GeoJSONAnalyzerType type; + private GeoAnalyzerOptions options; + private Boolean legacy; + + public GeoJSONAnalyzerType getType() { + return type; + } + + public void setType(GeoJSONAnalyzerType type) { + this.type = type; + } + + /** + * @return Options for fine-tuning geo queries {@link GeoJSONAnalyzerProperties}. These options should generally + * remain unchanged. + */ + public GeoAnalyzerOptions getOptions() { + return options; + } + + public void setOptions(GeoAnalyzerOptions options) { + this.options = options; + } + + /** + * @return This option controls how GeoJSON Polygons are interpreted (introduced in v3.10.5). + * - If `legacy` is `true`, the smaller of the two regions defined by a + * linear ring is interpreted as the interior of the ring and a ring can at most + * enclose half the Earth's surface. + * - If `legacy` is `false`, the area to the left of the boundary ring's + * path is considered to be the interior and a ring can enclose the entire + * surface of the Earth. + *

+ * The default is `false`. + */ + public Boolean getLegacy() { + return legacy; + } + + public void setLegacy(Boolean legacy) { + this.legacy = legacy; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + GeoJSONAnalyzerProperties that = (GeoJSONAnalyzerProperties) o; + return type == that.type && Objects.equals(options, that.options) && Objects.equals(legacy, that.legacy); + } + + @Override + public int hashCode() { + return Objects.hash(type, options, legacy); + } + + public enum GeoJSONAnalyzerType { + + /** + * (default): index all GeoJSON geometry types (Point, Polygon etc.) + */ + shape, + + /** + * compute and only index the centroid of the input geometry + */ + centroid, + + /** + * only index GeoJSON objects of type Point, ignore all other geometry types + */ + point + } +} diff --git a/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/GeoPointAnalyzer.java b/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/GeoPointAnalyzer.java new file mode 100644 index 000000000..56c1726e6 --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/GeoPointAnalyzer.java @@ -0,0 +1,64 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + + +package com.arangodb.entity.arangosearch.analyzer; + +import com.arangodb.entity.arangosearch.AnalyzerType; + +import java.util.Objects; + +/** + * An Analyzer capable of breaking up JSON object describing a coordinate into a set of indexable tokens for further + * usage with ArangoSearch Geo functions. + * + * @author Michele Rastelli + * @see API Documentation + * @since ArangoDB 3.8 + */ +public final class GeoPointAnalyzer extends SearchAnalyzer { + private GeoPointAnalyzerProperties properties; + + public GeoPointAnalyzer() { + setType(AnalyzerType.geopoint); + } + + public GeoPointAnalyzerProperties getProperties() { + return properties; + } + + public void setProperties(GeoPointAnalyzerProperties properties) { + this.properties = properties; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + if (!super.equals(o)) return false; + GeoPointAnalyzer that = (GeoPointAnalyzer) o; + return Objects.equals(properties, that.properties); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), properties); + } +} diff --git a/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/GeoPointAnalyzerProperties.java b/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/GeoPointAnalyzerProperties.java new file mode 100644 index 000000000..a0c148216 --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/GeoPointAnalyzerProperties.java @@ -0,0 +1,87 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity.arangosearch.analyzer; + + +import java.util.Arrays; +import java.util.Objects; + +/** + * @author Michele Rastelli + */ +public final class GeoPointAnalyzerProperties { + + private String[] latitude; + private String[] longitude; + private GeoAnalyzerOptions options; + + /** + * @return array of strings that describes the attribute path of the latitude value relative to the field for which + * the Analyzer is defined in the View + */ + public String[] getLatitude() { + return latitude; + } + + public void setLatitude(String[] latitude) { + this.latitude = latitude; + } + + /** + * @return array of strings that describes the attribute path of the longitude value relative to the field for which + * the Analyzer is defined in the View + */ + public String[] getLongitude() { + return longitude; + } + + public void setLongitude(String[] longitude) { + this.longitude = longitude; + } + + /** + * @return Options for fine-tuning geo queries {@link GeoPointAnalyzerProperties}. These options should generally + * remain unchanged. + */ + public GeoAnalyzerOptions getOptions() { + return options; + } + + public void setOptions(GeoAnalyzerOptions options) { + this.options = options; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + GeoPointAnalyzerProperties that = (GeoPointAnalyzerProperties) o; + return Arrays.equals(latitude, that.latitude) && Arrays.equals(longitude, that.longitude) && Objects.equals(options, that.options); + } + + @Override + public int hashCode() { + int result = Objects.hash(options); + result = 31 * result + Arrays.hashCode(latitude); + result = 31 * result + Arrays.hashCode(longitude); + return result; + } +} diff --git a/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/GeoS2Analyzer.java b/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/GeoS2Analyzer.java new file mode 100644 index 000000000..1cee8128e --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/GeoS2Analyzer.java @@ -0,0 +1,68 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + + +package com.arangodb.entity.arangosearch.analyzer; + +import com.arangodb.entity.arangosearch.AnalyzerType; + +import java.util.Objects; + +/** + * An Analyzer capable of breaking up a GeoJSON object or coordinate array in [longitude, latitude] order into a set of + * indexable tokens for further usage with ArangoSearch Geo functions. + *

+ * The Analyzer is similar to {@link GeoJSONAnalyzer}, but it internally uses a format for storing the geo-spatial data + * that is more efficient. You can choose between different formats to make a tradeoff between the size on disk, the + * precision, and query performance. + * + * @author Michele Rastelli + * @see API Documentation + * @since ArangoDB 3.10.5 + */ +public final class GeoS2Analyzer extends SearchAnalyzer { + private GeoS2AnalyzerProperties properties; + + public GeoS2Analyzer() { + setType(AnalyzerType.geo_s2); + } + + public GeoS2AnalyzerProperties getProperties() { + return properties; + } + + public void setProperties(GeoS2AnalyzerProperties properties) { + this.properties = properties; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + if (!super.equals(o)) return false; + GeoS2Analyzer that = (GeoS2Analyzer) o; + return Objects.equals(properties, that.properties); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), properties); + } +} diff --git a/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/GeoS2AnalyzerProperties.java b/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/GeoS2AnalyzerProperties.java new file mode 100644 index 000000000..5a570b512 --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/GeoS2AnalyzerProperties.java @@ -0,0 +1,119 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity.arangosearch.analyzer; + + +import java.util.Objects; + +/** + * @author Michele Rastelli + */ +public final class GeoS2AnalyzerProperties { + + private GeoS2AnalyzerType type; + private GeoAnalyzerOptions options; + private GeoS2Format format; + + public GeoS2AnalyzerType getType() { + return type; + } + + public void setType(GeoS2AnalyzerType type) { + this.type = type; + } + + /** + * @return Options for fine-tuning geo queries {@link GeoS2AnalyzerProperties}. These options should generally + * remain unchanged. + */ + public GeoAnalyzerOptions getOptions() { + return options; + } + + public void setOptions(GeoAnalyzerOptions options) { + this.options = options; + } + + /** + * @return The internal binary representation to use for storing the geo-spatial data in an index. + */ + public GeoS2Format getFormat() { + return format; + } + + public void setFormat(GeoS2Format format) { + this.format = format; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + GeoS2AnalyzerProperties that = (GeoS2AnalyzerProperties) o; + return type == that.type && Objects.equals(options, that.options) && format == that.format; + } + + @Override + public int hashCode() { + return Objects.hash(type, options, format); + } + + public enum GeoS2AnalyzerType { + + /** + * (default): index all GeoJSON geometry types (Point, Polygon etc.) + */ + shape, + + /** + * compute and only index the centroid of the input geometry + */ + centroid, + + /** + * only index GeoJSON objects of type Point, ignore all other geometry types + */ + point + } + + public enum GeoS2Format { + /** + * Store each latitude and longitude value as an 8-byte floating-point value (16 bytes per coordinate pair). + * This format preserves numeric values exactly and is more compact than the VelocyPack format used by + * {@link GeoJSONAnalyzer}. (default) + */ + latLngDouble, + + /** + * Store each latitude and longitude value as an 4-byte integer value (8 bytes per coordinate pair). This is the + * most compact format but the precision is limited to approximately 1 to 10 centimeters. + */ + latLngInt, + + /** + * Store each longitude-latitude pair in the native format of Google S2 which is used for geo-spatial + * calculations (24 bytes per coordinate pair). This is not a particular compact format but it reduces the + * number of computations necessary when you execute geo-spatial queries. This format preserves numeric values + * exactly. + */ + s2Point + } +} diff --git a/src/main/java/com/arangodb/entity/CollectionRevisionEntity.java b/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/IdentityAnalyzer.java similarity index 59% rename from src/main/java/com/arangodb/entity/CollectionRevisionEntity.java rename to core/src/main/java/com/arangodb/entity/arangosearch/analyzer/IdentityAnalyzer.java index ee34f1673..f5b7e91be 100644 --- a/src/main/java/com/arangodb/entity/CollectionRevisionEntity.java +++ b/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/IdentityAnalyzer.java @@ -1,37 +1,36 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.entity; - -/** - * @author Mark Vollmary - * - * @see API - * Documentation - */ -public class CollectionRevisionEntity extends CollectionEntity { - - private String revision; - - public String getRevision() { - return revision; - } - -} +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity.arangosearch.analyzer; + + +import com.arangodb.entity.arangosearch.AnalyzerType; + +/** + * An Analyzer applying the identity transformation, i.e. returning the input unmodified. + * + * @author Michele Rastelli + * @see API Documentation + */ +public final class IdentityAnalyzer extends SearchAnalyzer { + public IdentityAnalyzer() { + setType(AnalyzerType.identity); + } +} diff --git a/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/MinHashAnalyzer.java b/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/MinHashAnalyzer.java new file mode 100644 index 000000000..d8e4d84e4 --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/MinHashAnalyzer.java @@ -0,0 +1,64 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity.arangosearch.analyzer; + + +import com.arangodb.entity.arangosearch.AnalyzerType; + +import java.util.Objects; + +/** + * An Analyzer that computes so called MinHash signatures using a locality-sensitive hash function. It applies an + * Analyzer of your choice before the hashing, for example, to break up text into words. + * + * @author Michele Rastelli + * @see API Documentation + * @since ArangoDB 3.10 + */ +public final class MinHashAnalyzer extends SearchAnalyzer { + public MinHashAnalyzer() { + setType(AnalyzerType.minhash); + } + + private MinHashAnalyzerProperties properties; + + public MinHashAnalyzerProperties getProperties() { + return properties; + } + + public void setProperties(MinHashAnalyzerProperties properties) { + this.properties = properties; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + if (!super.equals(o)) return false; + MinHashAnalyzer that = (MinHashAnalyzer) o; + return Objects.equals(properties, that.properties); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), properties); + } +} diff --git a/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/MinHashAnalyzerProperties.java b/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/MinHashAnalyzerProperties.java new file mode 100644 index 000000000..1c37c1c0f --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/MinHashAnalyzerProperties.java @@ -0,0 +1,63 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity.arangosearch.analyzer; + + +import java.util.Objects; + +/** + * @author Michele Rastelli + * @since ArangoDB 3.10 + */ +public final class MinHashAnalyzerProperties { + + private SearchAnalyzer analyzer; + private Integer numHashes; + + public SearchAnalyzer getAnalyzer() { + return analyzer; + } + + public void setAnalyzer(SearchAnalyzer analyzer) { + this.analyzer = analyzer; + } + + public Integer getNumHashes() { + return numHashes; + } + + public void setNumHashes(Integer numHashes) { + this.numHashes = numHashes; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + MinHashAnalyzerProperties that = (MinHashAnalyzerProperties) o; + return Objects.equals(analyzer, that.analyzer) && Objects.equals(numHashes, that.numHashes); + } + + @Override + public int hashCode() { + return Objects.hash(analyzer, numHashes); + } +} diff --git a/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/MultiDelimiterAnalyzer.java b/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/MultiDelimiterAnalyzer.java new file mode 100644 index 000000000..3233e39c6 --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/MultiDelimiterAnalyzer.java @@ -0,0 +1,64 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity.arangosearch.analyzer; + + +import com.arangodb.entity.arangosearch.AnalyzerType; + +import java.util.Objects; + +/** + * An Analyzer capable of breaking up text into tokens using multiple delimiters. + * Unlike with the delimiter Analyzer, the multi_delimiter Analyzer does not support quoting fields. + * + * @author Michele Rastelli + * @see API Documentation + * @since ArangoDB 3.12 + */ +public final class MultiDelimiterAnalyzer extends SearchAnalyzer { + private MultiDelimiterAnalyzerProperties properties; + + public MultiDelimiterAnalyzer() { + setType(AnalyzerType.multi_delimiter); + } + + public MultiDelimiterAnalyzerProperties getProperties() { + return properties; + } + + public void setProperties(MultiDelimiterAnalyzerProperties properties) { + this.properties = properties; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + if (!super.equals(o)) return false; + MultiDelimiterAnalyzer that = (MultiDelimiterAnalyzer) o; + return Objects.equals(properties, that.properties); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), properties); + } +} diff --git a/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/MultiDelimiterAnalyzerProperties.java b/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/MultiDelimiterAnalyzerProperties.java new file mode 100644 index 000000000..ae8104f61 --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/MultiDelimiterAnalyzerProperties.java @@ -0,0 +1,58 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity.arangosearch.analyzer; + + +import java.util.*; + +/** + * @author Michele Rastelli + * @since ArangoDB 3.12 + */ +public final class MultiDelimiterAnalyzerProperties { + + private Collection delimiters = Collections.emptyList(); + + /** + * @return a list of strings of which each is considered as one delimiter that can be one or multiple characters + * long. The delimiters must not overlap, which means that a delimiter cannot be a prefix of another delimiter. + */ + public Collection getDelimiters() { + return delimiters; + } + + public void setDelimiters(String... delimiters) { + this.delimiters = Arrays.asList(delimiters); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + MultiDelimiterAnalyzerProperties that = (MultiDelimiterAnalyzerProperties) o; + return Objects.equals(delimiters, that.delimiters); + } + + @Override + public int hashCode() { + return Objects.hash(delimiters); + } +} diff --git a/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/NGramAnalyzer.java b/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/NGramAnalyzer.java new file mode 100644 index 000000000..84c9f4c39 --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/NGramAnalyzer.java @@ -0,0 +1,67 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity.arangosearch.analyzer; + + +import com.arangodb.entity.arangosearch.AnalyzerType; + +import java.util.Objects; + +/** + * An Analyzer capable of producing n-grams from a specified input in a range of min..max (inclusive). Can optionally + * preserve the original input. + *

+ * This Analyzer type can be used to implement substring matching. Note that it slices the input based on bytes and not + * characters by default (streamType). The β€œbinary” mode supports single-byte characters only; multi-byte UTF-8 + * characters raise an Invalid UTF-8 sequence query error. + * + * @author Michele Rastelli + * @see API Documentation + */ +public final class NGramAnalyzer extends SearchAnalyzer { + private NGramAnalyzerProperties properties; + + public NGramAnalyzer() { + setType(AnalyzerType.ngram); + } + + public NGramAnalyzerProperties getProperties() { + return properties; + } + + public void setProperties(NGramAnalyzerProperties properties) { + this.properties = properties; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + if (!super.equals(o)) return false; + NGramAnalyzer that = (NGramAnalyzer) o; + return Objects.equals(properties, that.properties); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), properties); + } +} diff --git a/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/NGramAnalyzerProperties.java b/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/NGramAnalyzerProperties.java new file mode 100644 index 000000000..eba366945 --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/NGramAnalyzerProperties.java @@ -0,0 +1,135 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + + +package com.arangodb.entity.arangosearch.analyzer; + +import java.util.Objects; + +/** + * An Analyzer capable of producing n-grams from a specified input in a range of min..max (inclusive). Can optionally + * preserve the original input. + *

+ * This Analyzer type can be used to implement substring matching. Note that it slices the input based on bytes and not + * characters by default (streamType). The β€œbinary” mode supports single-byte characters only; multi-byte UTF-8 + * characters raise an Invalid UTF-8 sequence query error. + * + * @author Michele Rastelli + * @see API Documentation + */ +public final class NGramAnalyzerProperties { + + private long min; + private long max; + private boolean preserveOriginal; + private String startMarker; + private String endMarker; + private StreamType streamType; + + public NGramAnalyzerProperties() { + startMarker = ""; + endMarker = ""; + streamType = StreamType.binary; + } + + /** + * @return minimum n-gram length + */ + public long getMin() { + return min; + } + + public void setMin(long min) { + this.min = min; + } + + /** + * @return maximum n-gram length + */ + public long getMax() { + return max; + } + + public void setMax(long max) { + this.max = max; + } + + /** + * @return true to include the original value as well + * false to produce the n-grams based on min and max only + */ + public boolean isPreserveOriginal() { + return preserveOriginal; + } + + public void setPreserveOriginal(boolean preserveOriginal) { + this.preserveOriginal = preserveOriginal; + } + + /** + * @return this value will be prepended to n-grams which include the beginning of the input. Can be used for + * matching prefixes. Choose a character or sequence as marker which does not occur in the input + */ + public String getStartMarker() { + return startMarker; + } + + public void setStartMarker(String startMarker) { + this.startMarker = startMarker; + } + + /** + * @return this value will be appended to n-grams which include the end of the input. Can be used for matching + * suffixes. Choose a character or sequence as marker which does not occur in the input. + */ + public String getEndMarker() { + return endMarker; + } + + public void setEndMarker(String endMarker) { + this.endMarker = endMarker; + } + + public StreamType getStreamType() { + return streamType; + } + + public void setStreamType(StreamType streamType) { + this.streamType = streamType; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + NGramAnalyzerProperties that = (NGramAnalyzerProperties) o; + return min == that.min && + max == that.max && + preserveOriginal == that.preserveOriginal && + Objects.equals(startMarker, that.startMarker) && + Objects.equals(endMarker, that.endMarker) && + streamType == that.streamType; + } + + @Override + public int hashCode() { + return Objects.hash(min, max, preserveOriginal, startMarker, endMarker, streamType); + } +} diff --git a/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/NearestNeighborsAnalyzer.java b/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/NearestNeighborsAnalyzer.java new file mode 100644 index 000000000..bbebd3785 --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/NearestNeighborsAnalyzer.java @@ -0,0 +1,66 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity.arangosearch.analyzer; + + +import com.arangodb.entity.arangosearch.AnalyzerType; + +import java.util.Objects; + +/** + * An Analyzer capable of finding nearest neighbors of tokens in the input. It applies a user-provided supervised + * fastText word embedding model to retrieve nearest neighbor tokens in the text. It is able to find neighbors of + * individual tokens as well as entire input strings. For entire input strings, the Analyzer will return nearest + * neighbors for each token within the input string. + * + * @author Michele Rastelli + * @see API Documentation + * @since ArangoDB 3.10 + */ +public final class NearestNeighborsAnalyzer extends SearchAnalyzer { + public NearestNeighborsAnalyzer() { + setType(AnalyzerType.nearest_neighbors); + } + + private NearestNeighborsAnalyzerProperties properties; + + public NearestNeighborsAnalyzerProperties getProperties() { + return properties; + } + + public void setProperties(NearestNeighborsAnalyzerProperties properties) { + this.properties = properties; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + if (!super.equals(o)) return false; + NearestNeighborsAnalyzer that = (NearestNeighborsAnalyzer) o; + return Objects.equals(properties, that.properties); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), properties); + } +} diff --git a/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/NearestNeighborsAnalyzerProperties.java b/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/NearestNeighborsAnalyzerProperties.java new file mode 100644 index 000000000..25f31ce7a --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/NearestNeighborsAnalyzerProperties.java @@ -0,0 +1,69 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity.arangosearch.analyzer; + + +import com.fasterxml.jackson.annotation.JsonProperty; + +import java.util.Objects; + +/** + * @author Michele Rastelli + * @since ArangoDB 3.10 + */ +public final class NearestNeighborsAnalyzerProperties { + + @JsonProperty("model_location") + private String modelLocation; + + @JsonProperty("top_k") + private Integer topK; + + + public String getModelLocation() { + return modelLocation; + } + + public void setModelLocation(String modelLocation) { + this.modelLocation = modelLocation; + } + + public Integer getTopK() { + return topK; + } + + public void setTopK(Integer topK) { + this.topK = topK; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + NearestNeighborsAnalyzerProperties that = (NearestNeighborsAnalyzerProperties) o; + return Objects.equals(modelLocation, that.modelLocation) && Objects.equals(topK, that.topK); + } + + @Override + public int hashCode() { + return Objects.hash(modelLocation, topK); + } +} diff --git a/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/NormAnalyzer.java b/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/NormAnalyzer.java new file mode 100644 index 000000000..f51c66f6b --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/NormAnalyzer.java @@ -0,0 +1,62 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity.arangosearch.analyzer; + + +import com.arangodb.entity.arangosearch.AnalyzerType; + +import java.util.Objects; + +/** + * An Analyzer capable of normalizing the text, treated as a single token, i.e. case conversion and accent removal. + * + * @author Michele Rastelli + * @see API Documentation + */ +public final class NormAnalyzer extends SearchAnalyzer { + private NormAnalyzerProperties properties; + + public NormAnalyzer() { + setType(AnalyzerType.norm); + } + + public NormAnalyzerProperties getProperties() { + return properties; + } + + public void setProperties(NormAnalyzerProperties properties) { + this.properties = properties; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + if (!super.equals(o)) return false; + NormAnalyzer that = (NormAnalyzer) o; + return Objects.equals(properties, that.properties); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), properties); + } +} diff --git a/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/NormAnalyzerProperties.java b/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/NormAnalyzerProperties.java new file mode 100644 index 000000000..1e82ea36b --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/NormAnalyzerProperties.java @@ -0,0 +1,91 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity.arangosearch.analyzer; + + +import com.fasterxml.jackson.annotation.JsonProperty; + +import java.util.Objects; + +/** + * @author Michele Rastelli + */ +public final class NormAnalyzerProperties { + + private String locale; + + private boolean accent; + + @JsonProperty("case") + private SearchAnalyzerCase analyzerCase; + + /** + * @return a locale in the format `language[_COUNTRY][.encoding][@variant]` (square brackets denote optional parts), + * e.g. `de.utf-8` or `en_US.utf-8`. Only UTF-8 encoding is meaningful in ArangoDB. + * @see + * Supported Languages + */ + public String getLocale() { + return locale; + } + + public void setLocale(String locale) { + this.locale = locale; + } + + /** + * @return true to preserve accented characters (default) + * false to convert accented characters to their base characters + */ + public boolean isAccent() { + return accent; + } + + public void setAccent(boolean accent) { + this.accent = accent; + } + + public SearchAnalyzerCase getAnalyzerCase() { + return analyzerCase; + } + + /** + * @param analyzerCase defaults to {@link SearchAnalyzerCase#none} + */ + public void setAnalyzerCase(SearchAnalyzerCase analyzerCase) { + this.analyzerCase = analyzerCase; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + NormAnalyzerProperties that = (NormAnalyzerProperties) o; + return accent == that.accent && + Objects.equals(locale, that.locale) && + analyzerCase == that.analyzerCase; + } + + @Override + public int hashCode() { + return Objects.hash(locale, accent, analyzerCase); + } +} diff --git a/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/PipelineAnalyzer.java b/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/PipelineAnalyzer.java new file mode 100644 index 000000000..ee5668723 --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/PipelineAnalyzer.java @@ -0,0 +1,71 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity.arangosearch.analyzer; + + +import com.arangodb.entity.arangosearch.AnalyzerType; + +import java.util.Objects; + +/** + * An Analyzer capable of chaining effects of multiple Analyzers into one. The pipeline is a list of Analyzers, where + * the output of an Analyzer is passed to the next for further processing. + *

+ * LIMITATIONS: Analyzers of types {@link GeoPointAnalyzer} and {@link GeoJSONAnalyzer} cannot be used in + * pipelines and + * will make the creation fail. + *

+ * + * @author Michele Rastelli + * @see API Documentation + * @since ArangoDB 3.8 + */ +public final class PipelineAnalyzer extends SearchAnalyzer { + + private PipelineAnalyzerProperties properties; + + public PipelineAnalyzer() { + setType(AnalyzerType.pipeline); + } + + public PipelineAnalyzerProperties getProperties() { + return properties; + } + + public void setProperties(PipelineAnalyzerProperties properties) { + this.properties = properties; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + if (!super.equals(o)) return false; + PipelineAnalyzer that = (PipelineAnalyzer) o; + return Objects.equals(properties, that.properties); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), properties); + } + +} diff --git a/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/PipelineAnalyzerProperties.java b/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/PipelineAnalyzerProperties.java new file mode 100644 index 000000000..bdcf4a91d --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/PipelineAnalyzerProperties.java @@ -0,0 +1,73 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + + +package com.arangodb.entity.arangosearch.analyzer; + +import java.util.LinkedList; +import java.util.List; +import java.util.Objects; + +/** + * @author Michele Rastelli + */ +public final class PipelineAnalyzerProperties { + private List pipeline = new LinkedList<>(); + + /** + * Appends the specified analyzer to the end of the pipeline. + *

+ * LIMITATIONS: Analyzers of types {@link GeoPointAnalyzer} and {@link GeoJSONAnalyzer} cannot be used in + * pipelines and will make the creation fail. + *

+ * + * @param analyzer analyzer to be appended + * @return this + */ + public PipelineAnalyzerProperties addAnalyzer(final SearchAnalyzer analyzer) { + pipeline.add(analyzer); + return this; + } + + /** + * @return an array of Analyzer with type and properties attributes + */ + public List getPipeline() { + return pipeline; + } + + public void setPipeline(List pipeline) { + this.pipeline = pipeline; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + PipelineAnalyzerProperties that = (PipelineAnalyzerProperties) o; + return Objects.equals(pipeline, that.pipeline); + } + + @Override + public int hashCode() { + return Objects.hash(pipeline); + } + +} diff --git a/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/SearchAnalyzer.java b/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/SearchAnalyzer.java new file mode 100644 index 000000000..612c4b50c --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/SearchAnalyzer.java @@ -0,0 +1,113 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity.arangosearch.analyzer; + + +import com.arangodb.entity.arangosearch.AnalyzerFeature; +import com.arangodb.entity.arangosearch.AnalyzerType; +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.fasterxml.jackson.annotation.JsonSubTypes; +import com.fasterxml.jackson.annotation.JsonTypeInfo; + +import java.util.Collection; +import java.util.HashSet; +import java.util.Objects; +import java.util.Set; + +/** + * @author Michele Rastelli + */ +@JsonTypeInfo(use = JsonTypeInfo.Id.NAME, include = JsonTypeInfo.As.EXTERNAL_PROPERTY, property = "type") +@JsonSubTypes({ + @JsonSubTypes.Type(name = "identity", value = IdentityAnalyzer.class), + @JsonSubTypes.Type(name = "delimiter", value = DelimiterAnalyzer.class), + @JsonSubTypes.Type(name = "multi_delimiter", value = MultiDelimiterAnalyzer.class), + @JsonSubTypes.Type(name = "stem", value = StemAnalyzer.class), + @JsonSubTypes.Type(name = "norm", value = NormAnalyzer.class), + @JsonSubTypes.Type(name = "ngram", value = NGramAnalyzer.class), + @JsonSubTypes.Type(name = "text", value = TextAnalyzer.class), + @JsonSubTypes.Type(name = "pipeline", value = PipelineAnalyzer.class), + @JsonSubTypes.Type(name = "stopwords", value = StopwordsAnalyzer.class), + @JsonSubTypes.Type(name = "aql", value = AQLAnalyzer.class), + @JsonSubTypes.Type(name = "geojson", value = GeoJSONAnalyzer.class), + @JsonSubTypes.Type(name = "geopoint", value = GeoPointAnalyzer.class), + @JsonSubTypes.Type(name = "geo_s2", value = GeoS2Analyzer.class), + @JsonSubTypes.Type(name = "segmentation", value = SegmentationAnalyzer.class), + @JsonSubTypes.Type(name = "collation", value = CollationAnalyzer.class), + @JsonSubTypes.Type(name = "classification", value = ClassificationAnalyzer.class), + @JsonSubTypes.Type(name = "nearest_neighbors", value = NearestNeighborsAnalyzer.class), + @JsonSubTypes.Type(name = "minhash", value = MinHashAnalyzer.class), + @JsonSubTypes.Type(name = "wildcard", value = WildcardAnalyzer.class) +}) +public abstract class SearchAnalyzer { + private String name; + private AnalyzerType type; + private Collection features; + + /** + * @return The Analyzer name. + */ + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + /** + * @return The Analyzer type. + */ + @JsonIgnore + public AnalyzerType getType() { + return type; + } + + public void setType(AnalyzerType type) { + this.type = type; + } + + /** + * @return The set of features to set on the Analyzer generated fields. + */ + public Set getFeatures() { + return features != null ? new HashSet<>(features) : null; + } + + public void setFeatures(Set features) { + this.features = features; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + SearchAnalyzer that = (SearchAnalyzer) o; + return Objects.equals(getName(), that.getName()) + && getType() == that.getType() + && Objects.equals(getFeatures(), that.getFeatures()); + } + + @Override + public int hashCode() { + return Objects.hash(name, type, features); + } +} diff --git a/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/SearchAnalyzerCase.java b/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/SearchAnalyzerCase.java new file mode 100644 index 000000000..32049f882 --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/SearchAnalyzerCase.java @@ -0,0 +1,42 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity.arangosearch.analyzer; + + +/** + * @author Michele Rastelli + */ +public enum SearchAnalyzerCase { + /** + * convert to all lower-case characters + */ + lower, + + /** + * convert to all upper-case characters + */ + upper, + + /** + * to not change character case + */ + none +} diff --git a/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/SegmentationAnalyzer.java b/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/SegmentationAnalyzer.java new file mode 100644 index 000000000..fad2b16b3 --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/SegmentationAnalyzer.java @@ -0,0 +1,66 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity.arangosearch.analyzer; + + +import com.arangodb.entity.arangosearch.AnalyzerType; + +import java.util.Objects; + +/** + * An Analyzer capable of breaking up the input text into tokens in a language-agnostic manner, making it suitable for + * mixed language strings. + * It can optionally preserve all non-whitespace or all characters instead of keeping alphanumeric characters only, as + * well as apply case conversion. + * + * @author Michele Rastelli + * @see API Documentation + * @since ArangoDB 3.9 + */ +public final class SegmentationAnalyzer extends SearchAnalyzer { + private SegmentationAnalyzerProperties properties; + + public SegmentationAnalyzer() { + setType(AnalyzerType.segmentation); + } + + public SegmentationAnalyzerProperties getProperties() { + return properties; + } + + public void setProperties(SegmentationAnalyzerProperties properties) { + this.properties = properties; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + if (!super.equals(o)) return false; + SegmentationAnalyzer that = (SegmentationAnalyzer) o; + return Objects.equals(properties, that.properties); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), properties); + } +} diff --git a/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/SegmentationAnalyzerProperties.java b/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/SegmentationAnalyzerProperties.java new file mode 100644 index 000000000..f674d6e34 --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/SegmentationAnalyzerProperties.java @@ -0,0 +1,78 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity.arangosearch.analyzer; + + +import com.fasterxml.jackson.annotation.JsonProperty; + +import java.util.Objects; + +/** + * @author Michele Rastelli + * @since ArangoDB 3.9 + */ +public final class SegmentationAnalyzerProperties { + + @JsonProperty("break") + private BreakMode breakMode; + + @JsonProperty("case") + private SearchAnalyzerCase analyzerCase; + + public BreakMode getBreakMode() { + return breakMode; + } + + /** + * @param breakMode defaults to {@link BreakMode#alpha} + */ + public void setBreakMode(BreakMode breakMode) { + this.breakMode = breakMode; + } + + public SearchAnalyzerCase getAnalyzerCase() { + return analyzerCase; + } + + /** + * @param analyzerCase defaults to {@link SearchAnalyzerCase#lower} + */ + public void setAnalyzerCase(SearchAnalyzerCase analyzerCase) { + this.analyzerCase = analyzerCase; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + SegmentationAnalyzerProperties that = (SegmentationAnalyzerProperties) o; + return breakMode == that.breakMode && analyzerCase == that.analyzerCase; + } + + @Override + public int hashCode() { + return Objects.hash(breakMode, analyzerCase); + } + + public enum BreakMode { + all, alpha, graphic + } +} diff --git a/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/StemAnalyzer.java b/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/StemAnalyzer.java new file mode 100644 index 000000000..c567e3d91 --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/StemAnalyzer.java @@ -0,0 +1,62 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity.arangosearch.analyzer; + + +import com.arangodb.entity.arangosearch.AnalyzerType; + +import java.util.Objects; + +/** + * An Analyzer capable of stemming the text, treated as a single token, for supported languages. + * + * @author Michele Rastelli + * @see API Documentation + */ +public final class StemAnalyzer extends SearchAnalyzer { + private StemAnalyzerProperties properties; + + public StemAnalyzer() { + setType(AnalyzerType.stem); + } + + public StemAnalyzerProperties getProperties() { + return properties; + } + + public void setProperties(StemAnalyzerProperties properties) { + this.properties = properties; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + if (!super.equals(o)) return false; + StemAnalyzer that = (StemAnalyzer) o; + return Objects.equals(properties, that.properties); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), properties); + } +} diff --git a/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/StemAnalyzerProperties.java b/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/StemAnalyzerProperties.java new file mode 100644 index 000000000..c5e26dfe0 --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/StemAnalyzerProperties.java @@ -0,0 +1,59 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity.arangosearch.analyzer; + + +import java.util.Objects; + +/** + * @author Michele Rastelli + */ +public final class StemAnalyzerProperties { + + private String locale; + + /** + * @return a locale in the format `language[_COUNTRY][.encoding][@variant]` (square brackets denote optional parts), + * e.g. `de.utf-8` or `en_US.utf-8`. Only UTF-8 encoding is meaningful in ArangoDB. + * @see + * Supported Languages + */ + public String getLocale() { + return locale; + } + + public void setLocale(String locale) { + this.locale = locale; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + StemAnalyzerProperties that = (StemAnalyzerProperties) o; + return Objects.equals(locale, that.locale); + } + + @Override + public int hashCode() { + return Objects.hash(locale); + } +} diff --git a/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/StopwordsAnalyzer.java b/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/StopwordsAnalyzer.java new file mode 100644 index 000000000..09e9ee7e2 --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/StopwordsAnalyzer.java @@ -0,0 +1,63 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity.arangosearch.analyzer; + + +import com.arangodb.entity.arangosearch.AnalyzerType; + +import java.util.Objects; + +/** + * An Analyzer capable of removing specified tokens from the input. + * + * @author Michele Rastelli + * @see API Documentation + * @since ArangoDB 3.8 + */ +public final class StopwordsAnalyzer extends SearchAnalyzer { + private StopwordsAnalyzerProperties properties; + + public StopwordsAnalyzer() { + setType(AnalyzerType.stopwords); + } + + public StopwordsAnalyzerProperties getProperties() { + return properties; + } + + public void setProperties(StopwordsAnalyzerProperties properties) { + this.properties = properties; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + if (!super.equals(o)) return false; + StopwordsAnalyzer that = (StopwordsAnalyzer) o; + return Objects.equals(properties, that.properties); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), properties); + } +} diff --git a/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/StopwordsAnalyzerProperties.java b/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/StopwordsAnalyzerProperties.java new file mode 100644 index 000000000..daa3d4e04 --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/StopwordsAnalyzerProperties.java @@ -0,0 +1,139 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity.arangosearch.analyzer; + + +import com.fasterxml.jackson.annotation.JsonIgnore; + +import java.util.ArrayList; +import java.util.List; +import java.util.Objects; +import java.util.stream.Collectors; + +/** + * @author Michele Rastelli + */ +public final class StopwordsAnalyzerProperties { + + private final List stopwords; + private final boolean hex; + + public StopwordsAnalyzerProperties() { + stopwords = new ArrayList<>(); + hex = true; + } + + private static String stringToHex(String str) { + final StringBuilder hex = new StringBuilder(); + for (final char temp : str.toCharArray()) { + hex.append(Integer.toHexString(temp)); + } + return hex.toString(); + } + + private static String hexToString(String hex) { + final StringBuilder result = new StringBuilder(); + for (int i = 0; i < hex.length() - 1; i += 2) { + String tempInHex = hex.substring(i, (i + 2)); + int decimal = Integer.parseInt(tempInHex, 16); + result.append((char) decimal); + } + return result.toString(); + } + + public List getStopwords() { + return stopwords; + } + + /** + * @return list of verbatim strings that describe the tokens to be discarded. + */ + @JsonIgnore + public List getStopwordsAsStringList() { + if (hex) { + return stopwords.stream() + .map(StopwordsAnalyzerProperties::hexToString) + .collect(Collectors.toList()); + } else { + return stopwords; + } + } + + /** + * @return list of hex-encoded strings that describe the tokens to be discarded. + */ + @JsonIgnore + public List getStopwordsAsHexList() { + if (hex) { + return stopwords; + } else { + return stopwords.stream() + .map(StopwordsAnalyzerProperties::stringToHex) + .collect(Collectors.toList()); + } + } + + /** + * @return if false each string in {@link #stopwords} is used as verbatim, if true as hex-encoded. + */ + public boolean getHex() { + return hex; + } + + /** + * @param value stopword as verbatim string + * @return this + */ + public StopwordsAnalyzerProperties addStopwordAsString(final String value) { + if (hex) { + stopwords.add(stringToHex(value)); + } else { + stopwords.add(value); + } + return this; + } + + /** + * @param value stopword as hex string + * @return this + */ + public StopwordsAnalyzerProperties addStopwordAsHex(final String value) { + if (hex) { + stopwords.add(value); + } else { + stopwords.add(hexToString(value)); + } + return this; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + StopwordsAnalyzerProperties that = (StopwordsAnalyzerProperties) o; + return hex == that.hex && Objects.equals(stopwords, that.stopwords); + } + + @Override + public int hashCode() { + return Objects.hash(stopwords, hex); + } +} diff --git a/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/StreamType.java b/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/StreamType.java new file mode 100644 index 000000000..c99afd842 --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/StreamType.java @@ -0,0 +1,37 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity.arangosearch.analyzer; + + +/** + * @author Michele Rastelli + */ +public enum StreamType { + /** + * one byte is considered as one character (default) + */ + binary, + + /** + * one Unicode codepoint is treated as one character + */ + utf8 +} diff --git a/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/TextAnalyzer.java b/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/TextAnalyzer.java new file mode 100644 index 000000000..76aef1a48 --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/TextAnalyzer.java @@ -0,0 +1,63 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity.arangosearch.analyzer; + + +import com.arangodb.entity.arangosearch.AnalyzerType; + +import java.util.Objects; + +/** + * An Analyzer capable of breaking up strings into individual words while also optionally filtering out stop-words, + * extracting word stems, applying case conversion and accent removal. + * + * @author Michele Rastelli + * @see API Documentation + */ +public final class TextAnalyzer extends SearchAnalyzer { + private TextAnalyzerProperties properties; + + public TextAnalyzer() { + setType(AnalyzerType.text); + } + + public TextAnalyzerProperties getProperties() { + return properties; + } + + public void setProperties(TextAnalyzerProperties properties) { + this.properties = properties; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + if (!super.equals(o)) return false; + TextAnalyzer that = (TextAnalyzer) o; + return Objects.equals(properties, that.properties); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), properties); + } +} diff --git a/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/TextAnalyzerProperties.java b/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/TextAnalyzerProperties.java new file mode 100644 index 000000000..29984622b --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/TextAnalyzerProperties.java @@ -0,0 +1,168 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity.arangosearch.analyzer; + + +import com.fasterxml.jackson.annotation.JsonProperty; + +import java.util.Collections; +import java.util.List; +import java.util.Objects; + +/** + * @author Michele Rastelli + */ +public final class TextAnalyzerProperties { + + private String locale; + private boolean accent; + @JsonProperty("case") + private SearchAnalyzerCase analyzerCase; + private boolean stemming; + private EdgeNgram edgeNgram; + private List stopwords; + private String stopwordsPath; + + public TextAnalyzerProperties() { + stopwords = Collections.emptyList(); + } + + /** + * @return a locale in the format `language[_COUNTRY][.encoding][@variant]` (square brackets denote optional parts), + * e.g. `de.utf-8` or `en_US.utf-8`. Only UTF-8 encoding is meaningful in ArangoDB. + * @see + * Supported Languages + */ + public String getLocale() { + return locale; + } + + public void setLocale(String locale) { + this.locale = locale; + } + + /** + * @return true to preserve accented characters (default) + * false to convert accented characters to their base characters + */ + public boolean isAccent() { + return accent; + } + + public void setAccent(boolean accent) { + this.accent = accent; + } + + public SearchAnalyzerCase getAnalyzerCase() { + return analyzerCase; + } + + /** + * @param analyzerCase defaults to {@link SearchAnalyzerCase#lower} + */ + public void setAnalyzerCase(SearchAnalyzerCase analyzerCase) { + this.analyzerCase = analyzerCase; + } + + /** + * @return true to apply stemming on returned words (default) + * false to leave the tokenized words as-is + */ + public boolean isStemming() { + return stemming; + } + + public void setStemming(boolean stemming) { + this.stemming = stemming; + } + + /** + * @return if present, then edge n-grams are generated for each token (word). That is, the start of the n-gram is + * anchored to the beginning of the token, whereas the ngram Analyzer would produce all possible substrings from a + * single input token (within the defined length restrictions). Edge n-grams can be used to cover word-based + * auto-completion queries with an index, for which you should set the following other options: + * - accent: false + * - case: {@link SearchAnalyzerCase#lower} + * - stemming: false + */ + public EdgeNgram getEdgeNgram() { + return edgeNgram; + } + + public void setEdgeNgram(EdgeNgram edgeNgram) { + this.edgeNgram = edgeNgram; + } + + /** + * @return an array of strings with words to omit from result. Default: load words from stopwordsPath. To disable + * stop-word filtering provide an empty array []. If both stopwords and stopwordsPath are provided then both word + * sources are combined. + */ + public List getStopwords() { + return stopwords; + } + + public void setStopwords(List stopwords) { + this.stopwords = stopwords; + } + + /** + * @return path with a language sub-directory (e.g. en for a locale en_US.utf-8) containing files with words to + * omit. + * Each word has to be on a separate line. Everything after the first whitespace character on a line will be ignored + * and can be used for comments. The files can be named arbitrarily and have any file extension (or none). + *

+ * Default: if no path is provided then the value of the environment variable IRESEARCH_TEXT_STOPWORD_PATH is used + * to determine the path, or if it is undefined then the current working directory is assumed. If the stopwords + * attribute is provided then no stop-words are loaded from files, unless an explicit stopwordsPath is also + * provided. + *

+ * Note that if the stopwordsPath can not be accessed, is missing language sub-directories or has no files for a + * language required by an Analyzer, then the creation of a new Analyzer is refused. If such an issue is discovered + * for an existing Analyzer during startup then the server will abort with a fatal error. + */ + public String getStopwordsPath() { + return stopwordsPath; + } + + public void setStopwordsPath(String stopwordsPath) { + this.stopwordsPath = stopwordsPath; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + TextAnalyzerProperties that = (TextAnalyzerProperties) o; + return accent == that.accent && + stemming == that.stemming && + Objects.equals(locale, that.locale) && + analyzerCase == that.analyzerCase && + Objects.equals(edgeNgram, that.edgeNgram) && + Objects.equals(stopwords, that.stopwords) && + Objects.equals(stopwordsPath, that.stopwordsPath); + } + + @Override + public int hashCode() { + return Objects.hash(locale, accent, analyzerCase, stemming, edgeNgram, stopwords, stopwordsPath); + } +} diff --git a/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/WildcardAnalyzer.java b/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/WildcardAnalyzer.java new file mode 100644 index 000000000..2f90ef066 --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/WildcardAnalyzer.java @@ -0,0 +1,66 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity.arangosearch.analyzer; + + +import com.arangodb.entity.arangosearch.AnalyzerType; + +import java.util.Objects; + +/** + * An Analyzer that creates n-grams to enable fast partial matching for wildcard queries if you have large string + * values, especially if you want to search for suffixes or substrings in the middle of strings (infixes) as opposed to + * prefixes. + * It can apply an Analyzer of your choice before creating the n-grams, for example, to normalize text for + * case-insensitive and accent-insensitive search. + * + * @author Michele Rastelli + * @see API Documentation + */ +public final class WildcardAnalyzer extends SearchAnalyzer { + private WildcardAnalyzerProperties properties; + + public WildcardAnalyzer() { + setType(AnalyzerType.wildcard); + } + + public WildcardAnalyzerProperties getProperties() { + return properties; + } + + public void setProperties(WildcardAnalyzerProperties properties) { + this.properties = properties; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + if (!super.equals(o)) return false; + WildcardAnalyzer that = (WildcardAnalyzer) o; + return Objects.equals(properties, that.properties); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), properties); + } +} diff --git a/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/WildcardAnalyzerProperties.java b/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/WildcardAnalyzerProperties.java new file mode 100644 index 000000000..84042de08 --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/arangosearch/analyzer/WildcardAnalyzerProperties.java @@ -0,0 +1,68 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity.arangosearch.analyzer; + + +import java.util.Objects; + +/** + * @author Michele Rastelli + */ +public final class WildcardAnalyzerProperties { + + private Integer ngramSize; + private SearchAnalyzer analyzer; + + /** + * @return unsigned integer for the n-gram length, needs to be at least 2 + */ + public Integer getNgramSize() { + return ngramSize; + } + + /** + * @param ngramSize unsigned integer for the n-gram length, needs to be at least 2 + */ + public void setNgramSize(Integer ngramSize) { + this.ngramSize = ngramSize; + } + + public SearchAnalyzer getAnalyzer() { + return analyzer; + } + + public void setAnalyzer(SearchAnalyzer analyzer) { + this.analyzer = analyzer; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + WildcardAnalyzerProperties that = (WildcardAnalyzerProperties) o; + return Objects.equals(ngramSize, that.ngramSize) && Objects.equals(analyzer, that.analyzer); + } + + @Override + public int hashCode() { + return Objects.hash(ngramSize, analyzer); + } +} diff --git a/core/src/main/java/com/arangodb/internal/ArangoCollectionAsyncImpl.java b/core/src/main/java/com/arangodb/internal/ArangoCollectionAsyncImpl.java new file mode 100644 index 000000000..17fb9a8b2 --- /dev/null +++ b/core/src/main/java/com/arangodb/internal/ArangoCollectionAsyncImpl.java @@ -0,0 +1,516 @@ +/* + * DISCLAIMER + * + * Copyright 2018 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.internal; + +import com.arangodb.ArangoCollectionAsync; +import com.arangodb.ArangoDBException; +import com.arangodb.ArangoDatabaseAsync; +import com.arangodb.entity.*; +import com.arangodb.model.*; +import com.arangodb.util.RawData; + +import java.util.Collection; +import java.util.Objects; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CompletionException; + +import static com.arangodb.internal.ArangoErrors.*; +import static com.arangodb.internal.serde.SerdeUtils.constructParametricType; + +/** + * @author Mark Vollmary + * @author Michele Rastelli + */ +public class ArangoCollectionAsyncImpl extends InternalArangoCollection implements ArangoCollectionAsync { + + private final ArangoDatabaseAsync db; + + protected ArangoCollectionAsyncImpl(final ArangoDatabaseAsyncImpl db, final String name) { + super(db, db.name(), name); + this.db = db; + } + + @Override + public ArangoDatabaseAsync db() { + return db; + } + + @Override + public CompletableFuture> insertDocument(final Object value) { + return executorAsync().execute(() -> insertDocumentRequest(value, new DocumentCreateOptions()), + constructParametricType(DocumentCreateEntity.class, Void.class)); + } + + @Override + @SuppressWarnings("unchecked") + public CompletableFuture> insertDocument(final T value, final DocumentCreateOptions options) { + return insertDocument(value, options, (Class) value.getClass()); + } + + @Override + public CompletableFuture> insertDocument(final T value, final DocumentCreateOptions options, + final Class type) { + return executorAsync().execute(() -> insertDocumentRequest(value, options), + constructParametricType(DocumentCreateEntity.class, type)); + } + + @Override + public CompletableFuture>> insertDocuments(RawData values) { + return executorAsync() + .execute(() -> insertDocumentsRequest(values, new DocumentCreateOptions()), + insertDocumentsResponseDeserializer(Void.class)); + } + + @Override + @SuppressWarnings("unchecked") + public CompletableFuture>> insertDocuments(RawData values, + DocumentCreateOptions options) { + return executorAsync() + .execute(() -> insertDocumentsRequest(values, options), + insertDocumentsResponseDeserializer((Class) values.getClass())); + } + + @Override + public CompletableFuture>> insertDocuments(final Iterable values) { + return insertDocuments(values, new DocumentCreateOptions()); + } + + @Override + public CompletableFuture>> insertDocuments( + final Iterable values, final DocumentCreateOptions options) { + return executorAsync() + .execute(() -> insertDocumentsRequest(values, options), + insertDocumentsResponseDeserializer(Void.class)); + } + + @Override + public CompletableFuture>> insertDocuments(Iterable values, + DocumentCreateOptions options, + Class type) { + return executorAsync() + .execute(() -> insertDocumentsRequest(values, options), insertDocumentsResponseDeserializer(type)); + } + + @Override + public CompletableFuture importDocuments(final Iterable values) { + return importDocuments(values, new DocumentImportOptions()); + } + + @Override + public CompletableFuture importDocuments(final Iterable values, final DocumentImportOptions options) { + return executorAsync().execute(() -> importDocumentsRequest(values, options), DocumentImportEntity.class); + } + + @Override + public CompletableFuture importDocuments(RawData values) { + return importDocuments(values, new DocumentImportOptions()); + } + + @Override + public CompletableFuture importDocuments(RawData values, DocumentImportOptions options) { + return executorAsync().execute(() -> importDocumentsRequest(values, options), DocumentImportEntity.class); + } + + @Override + public CompletableFuture getDocument(final String key, final Class type) { + return getDocument(key, type, new DocumentReadOptions()); + } + + @Override + public CompletableFuture getDocument(final String key, final Class type, final DocumentReadOptions options) { + return executorAsync().execute(() -> getDocumentRequest(key, options), getDocumentResponseDeserializer(type)) + .exceptionally(err -> { + Throwable e = err instanceof CompletionException ? err.getCause() : err; + if (e instanceof ArangoDBException) { + ArangoDBException aEx = (ArangoDBException) e; + if (matches(aEx, 304) + || matches(aEx, 404, ERROR_ARANGO_DOCUMENT_NOT_FOUND) + || matches(aEx, 412, ERROR_ARANGO_CONFLICT) + ) { + return null; + } + } + throw ArangoDBException.of(e); + }); + } + + @Override + public CompletableFuture> getDocuments(final Iterable keys, final Class type) { + return getDocuments(keys, type, new DocumentReadOptions()); + } + + @Override + public CompletableFuture> getDocuments( + final Iterable keys, final Class type, final DocumentReadOptions options) { + return executorAsync().execute(() -> getDocumentsRequest(keys, options), getDocumentsResponseDeserializer(type)); + } + + @Override + public CompletableFuture> replaceDocument(final String key, final Object value) { + return executorAsync().execute(() -> replaceDocumentRequest(key, value, new DocumentReplaceOptions()), + constructParametricType(DocumentUpdateEntity.class, Void.class)); + } + + @Override + @SuppressWarnings("unchecked") + public CompletableFuture> replaceDocument( + final String key, final T value, final DocumentReplaceOptions options) { + return replaceDocument(key, value, options, (Class) value.getClass()); + } + + @Override + public CompletableFuture> replaceDocument(String key, T value, DocumentReplaceOptions options, + Class type) { + return executorAsync().execute(() -> replaceDocumentRequest(key, value, options), + constructParametricType(DocumentUpdateEntity.class, type)); + } + + @Override + public CompletableFuture>> replaceDocuments(RawData values) { + return executorAsync().execute(() -> replaceDocumentsRequest(values, new DocumentReplaceOptions()), + replaceDocumentsResponseDeserializer(Void.class)); + } + + @Override + @SuppressWarnings("unchecked") + public CompletableFuture>> replaceDocuments(RawData values, + DocumentReplaceOptions options) { + return executorAsync().execute(() -> replaceDocumentsRequest(values, options), + replaceDocumentsResponseDeserializer((Class) values.getClass())); + } + + @Override + public CompletableFuture>> replaceDocuments(final Iterable values) { + return replaceDocuments(values, new DocumentReplaceOptions()); + } + + @Override + public CompletableFuture>> replaceDocuments( + final Iterable values, final DocumentReplaceOptions options) { + return executorAsync().execute(() -> replaceDocumentsRequest(values, options), + replaceDocumentsResponseDeserializer(Void.class)); + } + + @Override + public CompletableFuture>> replaceDocuments(Iterable values, + DocumentReplaceOptions options, + Class type) { + return executorAsync().execute(() -> replaceDocumentsRequest(values, options), replaceDocumentsResponseDeserializer(type)); + } + + @Override + public CompletableFuture> updateDocument(final String key, final Object value) { + return updateDocument(key, value, new DocumentUpdateOptions(), Void.class); + } + + @Override + @SuppressWarnings("unchecked") + public CompletableFuture> updateDocument( + final String key, final T value, final DocumentUpdateOptions options) { + return updateDocument(key, value, options, (Class) value.getClass()); + } + + @Override + public CompletableFuture> updateDocument( + final String key, final Object value, final DocumentUpdateOptions options, final Class returnType) { + return executorAsync().execute(() -> updateDocumentRequest(key, value, options), + constructParametricType(DocumentUpdateEntity.class, returnType)); + } + + @Override + public CompletableFuture>> updateDocuments(RawData values) { + return executorAsync() + .execute(() -> updateDocumentsRequest(values, new DocumentUpdateOptions()), + updateDocumentsResponseDeserializer(Void.class)); + } + + @Override + @SuppressWarnings("unchecked") + public CompletableFuture>> updateDocuments(RawData values, + DocumentUpdateOptions options) { + return executorAsync() + .execute(() -> updateDocumentsRequest(values, options), + updateDocumentsResponseDeserializer((Class) values.getClass())); + } + + @Override + public CompletableFuture>> updateDocuments(final Iterable values) { + return updateDocuments(values, new DocumentUpdateOptions()); + } + + @Override + public CompletableFuture>> updateDocuments( + final Iterable values, final DocumentUpdateOptions options) { + return updateDocuments(values, options, Void.class); + } + + @Override + public CompletableFuture>> updateDocuments( + final Iterable values, final DocumentUpdateOptions options, final Class returnType) { + return executorAsync() + .execute(() -> updateDocumentsRequest(values, options), updateDocumentsResponseDeserializer(returnType)); + } + + @Override + public CompletableFuture> deleteDocument(final String key) { + return deleteDocument(key, new DocumentDeleteOptions()); + } + + @Override + public CompletableFuture> deleteDocument(String key, DocumentDeleteOptions options) { + return deleteDocument(key, options, Void.class); + } + + @Override + public CompletableFuture> deleteDocument( + final String key, final DocumentDeleteOptions options, final Class type) { + return executorAsync().execute(() -> deleteDocumentRequest(key, options), + constructParametricType(DocumentDeleteEntity.class, type)); + } + + @Override + public CompletableFuture>> deleteDocuments(RawData values) { + return executorAsync().execute(() -> deleteDocumentsRequest(values, new DocumentDeleteOptions()), + deleteDocumentsResponseDeserializer(Void.class)); + } + + @Override + @SuppressWarnings("unchecked") + public CompletableFuture>> deleteDocuments(RawData values, + DocumentDeleteOptions options) { + return executorAsync().execute(() -> deleteDocumentsRequest(values, options), + deleteDocumentsResponseDeserializer((Class) values.getClass())); + } + + @Override + public CompletableFuture>> deleteDocuments(final Iterable values) { + return deleteDocuments(values, new DocumentDeleteOptions()); + } + + @Override + public CompletableFuture>> deleteDocuments( + final Iterable values, final DocumentDeleteOptions options) { + return deleteDocuments(values, options, Void.class); + } + + @Override + public CompletableFuture>> deleteDocuments( + final Iterable values, final DocumentDeleteOptions options, final Class type) { + return executorAsync().execute(() -> deleteDocumentsRequest(values, options), deleteDocumentsResponseDeserializer(type)); + } + + @Override + public CompletableFuture documentExists(final String key) { + return documentExists(key, new DocumentExistsOptions()); + } + + @Override + public CompletableFuture documentExists(final String key, final DocumentExistsOptions options) { + return executorAsync().execute(() -> documentExistsRequest(key, options), Void.class) + .thenApply(it -> true) + .exceptionally(err -> { + Throwable e = err instanceof CompletionException ? err.getCause() : err; + if (e instanceof ArangoDBException) { + ArangoDBException aEx = (ArangoDBException) e; + if (matches(aEx, 304) + || matches(aEx, 404) + || matches(aEx, 412) + ) { + return false; + } + } + throw ArangoDBException.of(e); + }); + } + + @Override + public CompletableFuture getIndex(final String id) { + return executorAsync().execute(() -> getIndexRequest(id), IndexEntity.class); + } + + @Override + public CompletableFuture getInvertedIndex(String id) { + return executorAsync().execute(() -> getIndexRequest(id), InvertedIndexEntity.class); + } + + @Override + public CompletableFuture deleteIndex(final String id) { + return executorAsync().execute(() -> deleteIndexRequest(id), deleteIndexResponseDeserializer()); + } + + @Override + public CompletableFuture ensurePersistentIndex(final Iterable fields, final PersistentIndexOptions options) { + return executorAsync().execute(() -> createPersistentIndexRequest(fields, options), IndexEntity.class); + } + + @Override + public CompletableFuture ensureInvertedIndex(final InvertedIndexOptions options) { + return executorAsync().execute(() -> createInvertedIndexRequest(options), InvertedIndexEntity.class); + } + + @Override + public CompletableFuture ensureGeoIndex(final Iterable fields, final GeoIndexOptions options) { + return executorAsync().execute(() -> createGeoIndexRequest(fields, options), IndexEntity.class); + } + + @Deprecated + @Override + public CompletableFuture ensureFulltextIndex(final Iterable fields, final FulltextIndexOptions options) { + return executorAsync().execute(() -> createFulltextIndexRequest(fields, options), IndexEntity.class); + } + + @Override + public CompletableFuture ensureTtlIndex(final Iterable fields, final TtlIndexOptions options) { + return executorAsync().execute(() -> createTtlIndexRequest(fields, options), IndexEntity.class); + } + + @Override + public CompletableFuture ensureZKDIndex(final Iterable fields, final ZKDIndexOptions options) { + return executorAsync().execute(() -> createZKDIndexRequest(fields, options), IndexEntity.class); + } + + @Override + public CompletableFuture ensureMDIndex(final Iterable fields, final MDIndexOptions options) { + return executorAsync().execute(() -> createMDIndexRequest(fields, options), IndexEntity.class); + } + + @Override + public CompletableFuture ensureMDPrefixedIndex(final Iterable fields, final MDPrefixedIndexOptions options) { + return executorAsync().execute(() -> createMDIndexRequest(fields, options), IndexEntity.class); + } + + @Override + public CompletableFuture> getIndexes() { + return executorAsync().execute(this::getIndexesRequest, getIndexesResponseDeserializer()); + } + + @Override + public CompletableFuture> getInvertedIndexes() { + return executorAsync().execute(this::getIndexesRequest, getInvertedIndexesResponseDeserializer()); + } + + @Override + public CompletableFuture exists() { + return getInfo() + .thenApply(Objects::nonNull) + .exceptionally(err -> { + Throwable e = err instanceof CompletionException ? err.getCause() : err; + if (e instanceof ArangoDBException) { + ArangoDBException aEx = (ArangoDBException) e; + if (matches(aEx, 404, ERROR_ARANGO_DATA_SOURCE_NOT_FOUND)) { + return false; + } + } + throw ArangoDBException.of(e); + }); + } + + @Override + public CompletableFuture truncate() { + return truncate(null); + } + + @Override + public CompletableFuture truncate(CollectionTruncateOptions options) { + return executorAsync().execute(() -> truncateRequest(options), CollectionEntity.class); + } + + @Override + public CompletableFuture count() { + return count(null); + } + + @Override + public CompletableFuture count(CollectionCountOptions options) { + return executorAsync().execute(() -> countRequest(options), CollectionPropertiesEntity.class); + } + + @Override + public CompletableFuture create() { + return db().createCollection(name()); + } + + @Override + public CompletableFuture create(final CollectionCreateOptions options) { + return db().createCollection(name(), options); + } + + @Override + public CompletableFuture drop() { + return executorAsync().execute(() -> dropRequest(null), Void.class); + } + + @Override + public CompletableFuture drop(final boolean isSystem) { + return executorAsync().execute(() -> dropRequest(isSystem), Void.class); + } + + @Override + public CompletableFuture getInfo() { + return executorAsync().execute(this::getInfoRequest, CollectionEntity.class); + } + + @Override + public CompletableFuture getProperties() { + return executorAsync().execute(this::getPropertiesRequest, CollectionPropertiesEntity.class); + } + + @Override + public CompletableFuture changeProperties(final CollectionPropertiesOptions options) { + return executorAsync().execute(() -> changePropertiesRequest(options), CollectionPropertiesEntity.class); + } + + @Override + public CompletableFuture rename(final String newName) { + return executorAsync().execute(() -> renameRequest(newName), CollectionEntity.class); + } + + @Override + public CompletableFuture getResponsibleShard(final Object value) { + return executorAsync().execute(() -> responsibleShardRequest(value), ShardEntity.class); + } + + @Override + public CompletableFuture getRevision() { + return executorAsync().execute(this::getRevisionRequest, CollectionRevisionEntity.class); + } + + @Override + public CompletableFuture grantAccess(final String user, final Permissions permissions) { + return executorAsync().execute(() -> grantAccessRequest(user, permissions), Void.class); + } + + @Override + public CompletableFuture revokeAccess(final String user) { + return executorAsync().execute(() -> grantAccessRequest(user, Permissions.NONE), Void.class); + } + + @Override + public CompletableFuture resetAccess(final String user) { + return executorAsync().execute(() -> resetAccessRequest(user), Void.class); + } + + @Override + public CompletableFuture getPermissions(final String user) { + return executorAsync().execute(() -> getPermissionsRequest(user), getPermissionsResponseDeserialzer()); + } + +} diff --git a/core/src/main/java/com/arangodb/internal/ArangoCollectionImpl.java b/core/src/main/java/com/arangodb/internal/ArangoCollectionImpl.java new file mode 100644 index 000000000..40cb4276e --- /dev/null +++ b/core/src/main/java/com/arangodb/internal/ArangoCollectionImpl.java @@ -0,0 +1,504 @@ +/* + * DISCLAIMER + * + * Copyright 2018 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.internal; + +import com.arangodb.ArangoCollection; +import com.arangodb.ArangoDBException; +import com.arangodb.ArangoDatabase; +import com.arangodb.entity.*; +import com.arangodb.model.*; +import com.arangodb.util.RawData; + +import java.util.Collection; + +import static com.arangodb.internal.ArangoErrors.*; +import static com.arangodb.internal.serde.SerdeUtils.constructParametricType; + +/** + * @author Mark Vollmary + * @author Michele Rastelli + */ +public class ArangoCollectionImpl extends InternalArangoCollection implements ArangoCollection { + + private final ArangoDatabase db; + + protected ArangoCollectionImpl(final ArangoDatabaseImpl db, final String name) { + super(db, db.name(), name); + this.db = db; + } + + @Override + public ArangoDatabase db() { + return db; + } + + @Override + public DocumentCreateEntity insertDocument(final Object value) { + return executorSync().execute(insertDocumentRequest(value, new DocumentCreateOptions()), + constructParametricType(DocumentCreateEntity.class, Void.class)); + } + + @Override + @SuppressWarnings("unchecked") + public DocumentCreateEntity insertDocument(final T value, final DocumentCreateOptions options) { + return insertDocument(value, options, (Class) value.getClass()); + } + + @Override + public DocumentCreateEntity insertDocument(final T value, final DocumentCreateOptions options, + final Class type) { + return executorSync().execute(insertDocumentRequest(value, options), + constructParametricType(DocumentCreateEntity.class, type)); + } + + @Override + public MultiDocumentEntity> insertDocuments(RawData values) { + return executorSync() + .execute(insertDocumentsRequest(values, new DocumentCreateOptions()), + insertDocumentsResponseDeserializer(Void.class)); + } + + @Override + @SuppressWarnings("unchecked") + public MultiDocumentEntity> insertDocuments(RawData values, + DocumentCreateOptions options) { + return executorSync() + .execute(insertDocumentsRequest(values, options), + insertDocumentsResponseDeserializer((Class) values.getClass())); + } + + @Override + public MultiDocumentEntity> insertDocuments(final Iterable values) { + return insertDocuments(values, new DocumentCreateOptions()); + } + + @Override + public MultiDocumentEntity> insertDocuments( + final Iterable values, final DocumentCreateOptions options) { + return executorSync() + .execute(insertDocumentsRequest(values, options), + insertDocumentsResponseDeserializer(Void.class)); + } + + @Override + public MultiDocumentEntity> insertDocuments(Iterable values, + DocumentCreateOptions options, + Class type) { + return executorSync() + .execute(insertDocumentsRequest(values, options), insertDocumentsResponseDeserializer(type)); + } + + @Override + public DocumentImportEntity importDocuments(final Iterable values) { + return importDocuments(values, new DocumentImportOptions()); + } + + @Override + public DocumentImportEntity importDocuments(final Iterable values, final DocumentImportOptions options) { + return executorSync().execute(importDocumentsRequest(values, options), DocumentImportEntity.class); + } + + @Override + public DocumentImportEntity importDocuments(RawData values) { + return importDocuments(values, new DocumentImportOptions()); + } + + @Override + public DocumentImportEntity importDocuments(RawData values, DocumentImportOptions options) { + return executorSync().execute(importDocumentsRequest(values, options), DocumentImportEntity.class); + } + + @Override + public T getDocument(final String key, final Class type) { + return getDocument(key, type, new DocumentReadOptions()); + } + + @Override + public T getDocument(final String key, final Class type, final DocumentReadOptions options) { + try { + return executorSync().execute(getDocumentRequest(key, options), getDocumentResponseDeserializer(type)); + } catch (final ArangoDBException e) { + if (matches(e, 304) + || matches(e, 404, ERROR_ARANGO_DOCUMENT_NOT_FOUND) + || matches(e, 412, ERROR_ARANGO_CONFLICT) + ) { + return null; + } + throw e; + } + } + + @Override + public MultiDocumentEntity getDocuments(final Iterable keys, final Class type) { + return getDocuments(keys, type, new DocumentReadOptions()); + } + + @Override + public MultiDocumentEntity getDocuments( + final Iterable keys, final Class type, final DocumentReadOptions options) { + return executorSync().execute(getDocumentsRequest(keys, options), getDocumentsResponseDeserializer(type)); + } + + @Override + public DocumentUpdateEntity replaceDocument(final String key, final Object value) { + return executorSync().execute(replaceDocumentRequest(key, value, new DocumentReplaceOptions()), + constructParametricType(DocumentUpdateEntity.class, Void.class)); + } + + @Override + @SuppressWarnings("unchecked") + public DocumentUpdateEntity replaceDocument( + final String key, final T value, final DocumentReplaceOptions options) { + return replaceDocument(key, value, options, (Class) value.getClass()); + } + + @Override + public DocumentUpdateEntity replaceDocument(String key, T value, DocumentReplaceOptions options, + Class type) { + return executorSync().execute(replaceDocumentRequest(key, value, options), + constructParametricType(DocumentUpdateEntity.class, type)); + } + + @Override + public MultiDocumentEntity> replaceDocuments(RawData values) { + return executorSync().execute(replaceDocumentsRequest(values, new DocumentReplaceOptions()), + replaceDocumentsResponseDeserializer(Void.class)); + } + + @Override + @SuppressWarnings("unchecked") + public MultiDocumentEntity> replaceDocuments(RawData values, + DocumentReplaceOptions options) { + return executorSync().execute(replaceDocumentsRequest(values, options), + replaceDocumentsResponseDeserializer((Class) values.getClass())); + } + + @Override + public MultiDocumentEntity> replaceDocuments(final Iterable values) { + return replaceDocuments(values, new DocumentReplaceOptions()); + } + + @Override + public MultiDocumentEntity> replaceDocuments( + final Iterable values, final DocumentReplaceOptions options) { + return executorSync().execute(replaceDocumentsRequest(values, options), + replaceDocumentsResponseDeserializer(Void.class)); + } + + @Override + public MultiDocumentEntity> replaceDocuments(Iterable values, + DocumentReplaceOptions options, + Class type) { + return executorSync().execute(replaceDocumentsRequest(values, options), replaceDocumentsResponseDeserializer(type)); + } + + @Override + public DocumentUpdateEntity updateDocument(final String key, final Object value) { + return updateDocument(key, value, new DocumentUpdateOptions(), Void.class); + } + + @Override + @SuppressWarnings("unchecked") + public DocumentUpdateEntity updateDocument( + final String key, final T value, final DocumentUpdateOptions options) { + return updateDocument(key, value, options, (Class) value.getClass()); + } + + @Override + public DocumentUpdateEntity updateDocument( + final String key, final Object value, final DocumentUpdateOptions options, final Class returnType) { + return executorSync().execute(updateDocumentRequest(key, value, options), + constructParametricType(DocumentUpdateEntity.class, returnType)); + } + + @Override + public MultiDocumentEntity> updateDocuments(RawData values) { + return executorSync() + .execute(updateDocumentsRequest(values, new DocumentUpdateOptions()), + updateDocumentsResponseDeserializer(Void.class)); + } + + @Override + @SuppressWarnings("unchecked") + public MultiDocumentEntity> updateDocuments(RawData values, + DocumentUpdateOptions options) { + return executorSync() + .execute(updateDocumentsRequest(values, options), + updateDocumentsResponseDeserializer((Class) values.getClass())); + } + + @Override + public MultiDocumentEntity> updateDocuments(final Iterable values) { + return updateDocuments(values, new DocumentUpdateOptions()); + } + + @Override + public MultiDocumentEntity> updateDocuments( + final Iterable values, final DocumentUpdateOptions options) { + return updateDocuments(values, options, Void.class); + } + + @Override + public MultiDocumentEntity> updateDocuments( + final Iterable values, final DocumentUpdateOptions options, final Class returnType) { + return executorSync() + .execute(updateDocumentsRequest(values, options), updateDocumentsResponseDeserializer(returnType)); + } + + @Override + public DocumentDeleteEntity deleteDocument(final String key) { + return deleteDocument(key, new DocumentDeleteOptions()); + } + + @Override + public DocumentDeleteEntity deleteDocument(String key, DocumentDeleteOptions options) { + return deleteDocument(key, options, Void.class); + } + + @Override + public DocumentDeleteEntity deleteDocument( + final String key, final DocumentDeleteOptions options, final Class type) { + return executorSync().execute(deleteDocumentRequest(key, options), + constructParametricType(DocumentDeleteEntity.class, type)); + } + + @Override + public MultiDocumentEntity> deleteDocuments(RawData values) { + return executorSync().execute(deleteDocumentsRequest(values, new DocumentDeleteOptions()), + deleteDocumentsResponseDeserializer(Void.class)); + } + + @Override + @SuppressWarnings("unchecked") + public MultiDocumentEntity> deleteDocuments(RawData values, + DocumentDeleteOptions options) { + return executorSync().execute(deleteDocumentsRequest(values, options), + deleteDocumentsResponseDeserializer((Class) values.getClass())); + } + + @Override + public MultiDocumentEntity> deleteDocuments(final Iterable values) { + return deleteDocuments(values, new DocumentDeleteOptions()); + } + + @Override + public MultiDocumentEntity> deleteDocuments( + final Iterable values, final DocumentDeleteOptions options) { + return deleteDocuments(values, options, Void.class); + } + + @Override + public MultiDocumentEntity> deleteDocuments( + final Iterable values, final DocumentDeleteOptions options, final Class type) { + return executorSync().execute(deleteDocumentsRequest(values, options), deleteDocumentsResponseDeserializer(type)); + } + + @Override + public Boolean documentExists(final String key) { + return documentExists(key, new DocumentExistsOptions()); + } + + @Override + public Boolean documentExists(final String key, final DocumentExistsOptions options) { + try { + executorSync().execute(documentExistsRequest(key, options), Void.class); + return true; + } catch (final ArangoDBException e) { + if (matches(e, 304) + || matches(e, 404) + || matches(e, 412) + ) { + return false; + } + throw e; + } + } + + @Override + public IndexEntity getIndex(final String id) { + return executorSync().execute(getIndexRequest(id), IndexEntity.class); + } + + @Override + public InvertedIndexEntity getInvertedIndex(String id) { + return executorSync().execute(getIndexRequest(id), InvertedIndexEntity.class); + } + + @Override + public String deleteIndex(final String id) { + return executorSync().execute(deleteIndexRequest(id), deleteIndexResponseDeserializer()); + } + + @Override + public IndexEntity ensurePersistentIndex(final Iterable fields, final PersistentIndexOptions options) { + return executorSync().execute(createPersistentIndexRequest(fields, options), IndexEntity.class); + } + + @Override + public InvertedIndexEntity ensureInvertedIndex(final InvertedIndexOptions options) { + return executorSync().execute(createInvertedIndexRequest(options), InvertedIndexEntity.class); + } + + @Override + public IndexEntity ensureGeoIndex(final Iterable fields, final GeoIndexOptions options) { + return executorSync().execute(createGeoIndexRequest(fields, options), IndexEntity.class); + } + + @Deprecated + @Override + public IndexEntity ensureFulltextIndex(final Iterable fields, final FulltextIndexOptions options) { + return executorSync().execute(createFulltextIndexRequest(fields, options), IndexEntity.class); + } + + @Override + public IndexEntity ensureTtlIndex(final Iterable fields, final TtlIndexOptions options) { + return executorSync().execute(createTtlIndexRequest(fields, options), IndexEntity.class); + } + + @Override + public IndexEntity ensureZKDIndex(final Iterable fields, final ZKDIndexOptions options) { + return executorSync().execute(createZKDIndexRequest(fields, options), IndexEntity.class); + } + + @Override + public IndexEntity ensureMDIndex(final Iterable fields, final MDIndexOptions options) { + return executorSync().execute(createMDIndexRequest(fields, options), IndexEntity.class); + } + + @Override + public IndexEntity ensureMDPrefixedIndex(final Iterable fields, final MDPrefixedIndexOptions options) { + return executorSync().execute(createMDIndexRequest(fields, options), IndexEntity.class); + } + + @Override + public Collection getIndexes() { + return executorSync().execute(getIndexesRequest(), getIndexesResponseDeserializer()); + } + + @Override + public Collection getInvertedIndexes() { + return executorSync().execute(getIndexesRequest(), getInvertedIndexesResponseDeserializer()); + } + + @Override + public boolean exists() { + try { + getInfo(); + return true; + } catch (final ArangoDBException e) { + if (matches(e, 404, ERROR_ARANGO_DATA_SOURCE_NOT_FOUND)) { + return false; + } + throw e; + } + } + + @Override + public CollectionEntity truncate() { + return truncate(null); + } + + @Override + public CollectionEntity truncate(CollectionTruncateOptions options) { + return executorSync().execute(truncateRequest(options), CollectionEntity.class); + } + + @Override + public CollectionPropertiesEntity count() { + return count(null); + } + + @Override + public CollectionPropertiesEntity count(CollectionCountOptions options) { + return executorSync().execute(countRequest(options), CollectionPropertiesEntity.class); + } + + @Override + public CollectionEntity create() { + return db().createCollection(name()); + } + + @Override + public CollectionEntity create(final CollectionCreateOptions options) { + return db().createCollection(name(), options); + } + + @Override + public void drop() { + executorSync().execute(dropRequest(null), Void.class); + } + + @Override + public void drop(final boolean isSystem) { + executorSync().execute(dropRequest(isSystem), Void.class); + } + + @Override + public CollectionEntity getInfo() { + return executorSync().execute(getInfoRequest(), CollectionEntity.class); + } + + @Override + public CollectionPropertiesEntity getProperties() { + return executorSync().execute(getPropertiesRequest(), CollectionPropertiesEntity.class); + } + + @Override + public CollectionPropertiesEntity changeProperties(final CollectionPropertiesOptions options) { + return executorSync().execute(changePropertiesRequest(options), CollectionPropertiesEntity.class); + } + + @Override + public CollectionEntity rename(final String newName) { + return executorSync().execute(renameRequest(newName), CollectionEntity.class); + } + + @Override + public ShardEntity getResponsibleShard(final Object value) { + return executorSync().execute(responsibleShardRequest(value), ShardEntity.class); + } + + @Override + public CollectionRevisionEntity getRevision() { + return executorSync().execute(getRevisionRequest(), CollectionRevisionEntity.class); + } + + @Override + public void grantAccess(final String user, final Permissions permissions) { + executorSync().execute(grantAccessRequest(user, permissions), Void.class); + } + + @Override + public void revokeAccess(final String user) { + executorSync().execute(grantAccessRequest(user, Permissions.NONE), Void.class); + } + + @Override + public void resetAccess(final String user) { + executorSync().execute(resetAccessRequest(user), Void.class); + } + + @Override + public Permissions getPermissions(final String user) { + return executorSync().execute(getPermissionsRequest(user), getPermissionsResponseDeserialzer()); + } + +} diff --git a/src/main/java/com/arangodb/internal/net/HostHandler.java b/core/src/main/java/com/arangodb/internal/ArangoCursorExecute.java similarity index 78% rename from src/main/java/com/arangodb/internal/net/HostHandler.java rename to core/src/main/java/com/arangodb/internal/ArangoCursorExecute.java index 1740f041a..ebdd1fcd2 100644 --- a/src/main/java/com/arangodb/internal/net/HostHandler.java +++ b/core/src/main/java/com/arangodb/internal/ArangoCursorExecute.java @@ -1,37 +1,35 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.internal.net; - -import com.arangodb.internal.Host; - -/** - * @author Mark Vollmary - * - */ -public interface HostHandler { - - Host get(); - - void success(); - - void fail(); - -} +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.internal; + +import com.arangodb.entity.CursorEntity; + + +/** + * @author Mark Vollmary + */ +public interface ArangoCursorExecute { + + CursorEntity next(String id, String nextBatchId); + + void close(String id); + +} diff --git a/core/src/main/java/com/arangodb/internal/ArangoDBAsyncImpl.java b/core/src/main/java/com/arangodb/internal/ArangoDBAsyncImpl.java new file mode 100644 index 000000000..80486499d --- /dev/null +++ b/core/src/main/java/com/arangodb/internal/ArangoDBAsyncImpl.java @@ -0,0 +1,202 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.internal; + +import com.arangodb.*; +import com.arangodb.entity.*; +import com.arangodb.internal.serde.SerdeUtils; +import com.arangodb.model.*; + +import java.util.Collection; +import java.util.concurrent.CompletableFuture; + +/** + * @author Mark Vollmary + * @author Heiko Kernbach + * @author Michele Rastelli + */ +public class ArangoDBAsyncImpl extends InternalArangoDB implements ArangoDBAsync { + + private final ArangoDB arangoDB; + + public ArangoDBAsyncImpl(final ArangoDBImpl arangoDB) { + super(arangoDB); + this.arangoDB = arangoDB; + } + + @Override + public void shutdown() { + arangoDB.shutdown(); + } + + @Override + public void updateJwt(String jwt) { + arangoDB.updateJwt(jwt); + } + + @Override + public ArangoDatabaseAsync db() { + return db(ArangoRequestParam.SYSTEM); + } + + @Override + public ArangoDatabaseAsync db(final String dbName) { + return new ArangoDatabaseAsyncImpl(this, dbName); + } + + @Override + public ArangoMetrics metrics() { + return new ArangoMetricsImpl(executorAsync().getQueueTimeMetrics()); + } + + @Override + public CompletableFuture createDatabase(final String dbName) { + return createDatabase(new DBCreateOptions().name(dbName)); + } + + @Override + public CompletableFuture createDatabase(DBCreateOptions options) { + return executorAsync().execute(() -> createDatabaseRequest(options), createDatabaseResponseDeserializer()); + } + + @Override + public CompletableFuture> getDatabases() { + return executorAsync().execute(() -> getDatabasesRequest(ArangoRequestParam.SYSTEM), getDatabaseResponseDeserializer()); + } + + @Override + public CompletableFuture> getAccessibleDatabases() { + return db().getAccessibleDatabases(); + } + + @Override + public CompletableFuture> getAccessibleDatabasesFor(final String user) { + return executorAsync().execute(() -> getAccessibleDatabasesForRequest(ArangoRequestParam.SYSTEM, user), + getAccessibleDatabasesForResponseDeserializer()); + } + + @Override + public CompletableFuture getVersion() { + return db().getVersion(); + } + + @Override + public CompletableFuture getEngine() { + return db().getEngine(); + } + + @Override + public CompletableFuture getRole() { + return executorAsync().execute(this::getRoleRequest, getRoleResponseDeserializer()); + } + + @Override + public CompletableFuture getServerId() { + return executorAsync().execute(this::getServerIdRequest, getServerIdResponseDeserializer()); + } + + @Override + public CompletableFuture createUser(final String user, final String passwd) { + return executorAsync().execute(() -> createUserRequest(ArangoRequestParam.SYSTEM, user, passwd, new UserCreateOptions()), + UserEntity.class); + } + + @Override + public CompletableFuture createUser(final String user, final String passwd, final UserCreateOptions options) { + return executorAsync().execute(() -> createUserRequest(ArangoRequestParam.SYSTEM, user, passwd, options), UserEntity.class); + } + + @Override + public CompletableFuture deleteUser(final String user) { + return executorAsync().execute(() -> deleteUserRequest(ArangoRequestParam.SYSTEM, user), Void.class); + } + + @Override + public CompletableFuture getUser(final String user) { + return executorAsync().execute(() -> getUserRequest(ArangoRequestParam.SYSTEM, user), UserEntity.class); + } + + @Override + public CompletableFuture> getUsers() { + return executorAsync().execute(() -> getUsersRequest(ArangoRequestParam.SYSTEM), getUsersResponseDeserializer()); + } + + @Override + public CompletableFuture updateUser(final String user, final UserUpdateOptions options) { + return executorAsync().execute(() -> updateUserRequest(ArangoRequestParam.SYSTEM, user, options), UserEntity.class); + } + + @Override + public CompletableFuture replaceUser(final String user, final UserUpdateOptions options) { + return executorAsync().execute(() -> replaceUserRequest(ArangoRequestParam.SYSTEM, user, options), UserEntity.class); + } + + @Override + public CompletableFuture grantDefaultDatabaseAccess(final String user, final Permissions permissions) { + return executorAsync().execute(() -> updateUserDefaultDatabaseAccessRequest(user, permissions), Void.class); + } + + @Override + public CompletableFuture grantDefaultCollectionAccess(final String user, final Permissions permissions) { + return executorAsync().execute(() -> updateUserDefaultCollectionAccessRequest(user, permissions), Void.class); + } + + @Override + public CompletableFuture> execute(Request request, Class type) { + return executorAsync().execute(() -> executeRequest(request), responseDeserializer(type)); + } + + @Override + public CompletableFuture getLogEntries(final LogOptions options) { + return executorAsync().execute(() -> getLogEntriesRequest(options), LogEntriesEntity.class); + } + + @Override + public CompletableFuture getLogLevel() { + return getLogLevel(new LogLevelOptions()); + } + + @Override + public CompletableFuture getLogLevel(final LogLevelOptions options) { + return executorAsync().execute(() -> getLogLevelRequest(options), LogLevelEntity.class); + } + + @Override + public CompletableFuture setLogLevel(final LogLevelEntity entity) { + return setLogLevel(entity, new LogLevelOptions()); + } + + @Override + public CompletableFuture setLogLevel(final LogLevelEntity entity, final LogLevelOptions options) { + return executorAsync().execute(() -> setLogLevelRequest(entity, options), LogLevelEntity.class); + } + + @Override + public CompletableFuture resetLogLevels(LogLevelOptions options) { + return executorAsync().execute(() -> resetLogLevelsRequest(options), LogLevelEntity.class); + } + + @Override + public CompletableFuture> getQueryOptimizerRules() { + return executorAsync().execute(this::getQueryOptimizerRulesRequest, SerdeUtils.constructListType(QueryOptimizerRule.class)); + } + +} diff --git a/core/src/main/java/com/arangodb/internal/ArangoDBImpl.java b/core/src/main/java/com/arangodb/internal/ArangoDBImpl.java new file mode 100644 index 000000000..24c4f164d --- /dev/null +++ b/core/src/main/java/com/arangodb/internal/ArangoDBImpl.java @@ -0,0 +1,216 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.internal; + +import com.arangodb.*; +import com.arangodb.entity.*; +import com.arangodb.internal.config.ArangoConfig; +import com.arangodb.internal.net.CommunicationProtocol; +import com.arangodb.internal.net.HostHandler; +import com.arangodb.internal.serde.SerdeUtils; +import com.arangodb.model.*; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.Collection; + +/** + * @author Mark Vollmary + * @author Heiko Kernbach + * @author Michele Rastelli + */ +public class ArangoDBImpl extends InternalArangoDB implements ArangoDB { + + private static final Logger LOGGER = LoggerFactory.getLogger(ArangoDBImpl.class); + private final HostHandler hostHandler; + + public ArangoDBImpl(final ArangoConfig config, + final CommunicationProtocol protocol, + final HostHandler hostHandler) { + super(protocol, config); + this.hostHandler = hostHandler; + LOGGER.debug("ArangoDB Client is ready to use"); + } + + @Override + public ArangoDBAsync async() { + return new ArangoDBAsyncImpl(this); + } + + @Override + public void shutdown() { + executorSync().disconnect(); + } + + @Override + public void updateJwt(String jwt) { + hostHandler.setJwt(jwt); + executorSync().setJwt(jwt); + } + + @Override + public ArangoDatabase db() { + return db(ArangoRequestParam.SYSTEM); + } + + @Override + public ArangoDatabase db(final String dbName) { + return new ArangoDatabaseImpl(this, dbName); + } + + @Override + public ArangoMetrics metrics() { + return new ArangoMetricsImpl(executorSync().getQueueTimeMetrics()); + } + + @Override + public Boolean createDatabase(final String dbName) { + return createDatabase(new DBCreateOptions().name(dbName)); + } + + @Override + public Boolean createDatabase(DBCreateOptions options) { + return executorSync().execute(createDatabaseRequest(options), createDatabaseResponseDeserializer()); + } + + @Override + public Collection getDatabases() { + return executorSync().execute(getDatabasesRequest(db().name()), getDatabaseResponseDeserializer()); + } + + @Override + public Collection getAccessibleDatabases() { + return db().getAccessibleDatabases(); + } + + @Override + public Collection getAccessibleDatabasesFor(final String user) { + return executorSync().execute(getAccessibleDatabasesForRequest(db().name(), user), + getAccessibleDatabasesForResponseDeserializer()); + } + + @Override + public ArangoDBVersion getVersion() { + return db().getVersion(); + } + + @Override + public ArangoDBEngine getEngine() { + return db().getEngine(); + } + + @Override + public ServerRole getRole() { + return executorSync().execute(getRoleRequest(), getRoleResponseDeserializer()); + } + + @Override + public String getServerId() { + return executorSync().execute(getServerIdRequest(), getServerIdResponseDeserializer()); + } + + @Override + public UserEntity createUser(final String user, final String passwd) { + return executorSync().execute(createUserRequest(db().name(), user, passwd, new UserCreateOptions()), + UserEntity.class); + } + + @Override + public UserEntity createUser(final String user, final String passwd, final UserCreateOptions options) { + return executorSync().execute(createUserRequest(db().name(), user, passwd, options), UserEntity.class); + } + + @Override + public void deleteUser(final String user) { + executorSync().execute(deleteUserRequest(db().name(), user), Void.class); + } + + @Override + public UserEntity getUser(final String user) { + return executorSync().execute(getUserRequest(db().name(), user), UserEntity.class); + } + + @Override + public Collection getUsers() { + return executorSync().execute(getUsersRequest(db().name()), getUsersResponseDeserializer()); + } + + @Override + public UserEntity updateUser(final String user, final UserUpdateOptions options) { + return executorSync().execute(updateUserRequest(db().name(), user, options), UserEntity.class); + } + + @Override + public UserEntity replaceUser(final String user, final UserUpdateOptions options) { + return executorSync().execute(replaceUserRequest(db().name(), user, options), UserEntity.class); + } + + @Override + public void grantDefaultDatabaseAccess(final String user, final Permissions permissions) { + executorSync().execute(updateUserDefaultDatabaseAccessRequest(user, permissions), Void.class); + } + + @Override + public void grantDefaultCollectionAccess(final String user, final Permissions permissions) { + executorSync().execute(updateUserDefaultCollectionAccessRequest(user, permissions), Void.class); + } + + @Override + public Response execute(Request request, Class type) { + return executorSync().execute(executeRequest(request), responseDeserializer(type)); + } + + @Override + public LogEntriesEntity getLogEntries(final LogOptions options) { + return executorSync().execute(getLogEntriesRequest(options), LogEntriesEntity.class); + } + + @Override + public LogLevelEntity getLogLevel() { + return getLogLevel(new LogLevelOptions()); + } + + @Override + public LogLevelEntity getLogLevel(final LogLevelOptions options) { + return executorSync().execute(getLogLevelRequest(options), LogLevelEntity.class); + } + + @Override + public LogLevelEntity setLogLevel(final LogLevelEntity entity) { + return setLogLevel(entity, new LogLevelOptions()); + } + + @Override + public LogLevelEntity setLogLevel(final LogLevelEntity entity, final LogLevelOptions options) { + return executorSync().execute(setLogLevelRequest(entity, options), LogLevelEntity.class); + } + + @Override + public LogLevelEntity resetLogLevels(LogLevelOptions options) { + return executorSync().execute(resetLogLevelsRequest(options), LogLevelEntity.class); + } + + @Override + public Collection getQueryOptimizerRules() { + return executorSync().execute(getQueryOptimizerRulesRequest(), SerdeUtils.constructListType(QueryOptimizerRule.class)); + } + +} diff --git a/core/src/main/java/com/arangodb/internal/ArangoDatabaseAsyncImpl.java b/core/src/main/java/com/arangodb/internal/ArangoDatabaseAsyncImpl.java new file mode 100644 index 000000000..7c7dda594 --- /dev/null +++ b/core/src/main/java/com/arangodb/internal/ArangoDatabaseAsyncImpl.java @@ -0,0 +1,426 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.internal; + +import com.arangodb.*; +import com.arangodb.entity.*; +import com.arangodb.entity.arangosearch.analyzer.SearchAnalyzer; +import com.arangodb.internal.cursor.ArangoCursorAsyncImpl; +import com.arangodb.internal.net.HostHandle; +import com.arangodb.model.*; +import com.arangodb.model.arangosearch.AnalyzerDeleteOptions; +import com.arangodb.model.arangosearch.ArangoSearchCreateOptions; +import com.arangodb.model.arangosearch.SearchAliasCreateOptions; + +import java.util.Collection; +import java.util.Map; +import java.util.Objects; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CompletionException; + +import static com.arangodb.internal.ArangoErrors.ERROR_ARANGO_DATABASE_NOT_FOUND; +import static com.arangodb.internal.ArangoErrors.matches; +import static com.arangodb.internal.serde.SerdeUtils.constructListType; + +/** + * @author Mark Vollmary + * @author Michele Rastelli + */ +public class ArangoDatabaseAsyncImpl extends InternalArangoDatabase implements ArangoDatabaseAsync { + + private final ArangoDBAsync arangoDB; + + protected ArangoDatabaseAsyncImpl(final ArangoDBAsyncImpl arangoDB, final String name) { + super(arangoDB, name); + this.arangoDB = arangoDB; + } + + @Override + public ArangoDBAsync arango() { + return arangoDB; + } + + @Override + public CompletableFuture getVersion() { + return executorAsync().execute(this::getVersionRequest, ArangoDBVersion.class); + } + + @Override + public CompletableFuture getEngine() { + return executorAsync().execute(this::getEngineRequest, ArangoDBEngine.class); + } + + @Override + public CompletableFuture exists() { + return getInfo() + .thenApply(Objects::nonNull) + .exceptionally(err -> { + Throwable e = err instanceof CompletionException ? err.getCause() : err; + if (e instanceof ArangoDBException) { + ArangoDBException aEx = (ArangoDBException) e; + if (matches(aEx, 404, ERROR_ARANGO_DATABASE_NOT_FOUND)) { + return false; + } + } + throw ArangoDBException.of(e); + }); + } + + @Override + public CompletableFuture> getAccessibleDatabases() { + return executorAsync().execute(this::getAccessibleDatabasesRequest, getDatabaseResponseDeserializer()); + } + + @Override + public ArangoCollectionAsync collection(String name) { + return new ArangoCollectionAsyncImpl(this, name); + } + + @Override + public CompletableFuture createCollection(final String name) { + return executorAsync().execute(() -> createCollectionRequest(name, new CollectionCreateOptions()), CollectionEntity.class); + } + + @Override + public CompletableFuture createCollection(final String name, final CollectionCreateOptions options) { + return executorAsync().execute(() -> createCollectionRequest(name, options), CollectionEntity.class); + } + + @Override + public CompletableFuture> getCollections() { + return executorAsync() + .execute(() -> getCollectionsRequest(new CollectionsReadOptions()), getCollectionsResponseDeserializer()); + } + + @Override + public CompletableFuture> getCollections(final CollectionsReadOptions options) { + return executorAsync().execute(() -> getCollectionsRequest(options), getCollectionsResponseDeserializer()); + } + + @Override + public CompletableFuture getIndex(final String id) { + final String[] split = id.split("/"); + return collection(split[0]).getIndex(split[1]); + } + + @Override + public CompletableFuture deleteIndex(final String id) { + final String[] split = id.split("/"); + return collection(split[0]).deleteIndex(split[1]); + } + + @Override + public CompletableFuture create() { + return arango().createDatabase(name()); + } + + @Override + public CompletableFuture drop() { + return executorAsync().execute(this::dropRequest, createDropResponseDeserializer()); + } + + @Override + public CompletableFuture grantAccess(final String user, final Permissions permissions) { + return executorAsync().execute(() -> grantAccessRequest(user, permissions), Void.class); + } + + @Override + public CompletableFuture grantAccess(final String user) { + return executorAsync().execute(() -> grantAccessRequest(user, Permissions.RW), Void.class); + } + + @Override + public CompletableFuture revokeAccess(final String user) { + return executorAsync().execute(() -> grantAccessRequest(user, Permissions.NONE), Void.class); + } + + @Override + public CompletableFuture resetAccess(final String user) { + return executorAsync().execute(() -> resetAccessRequest(user), Void.class); + } + + @Override + public CompletableFuture grantDefaultCollectionAccess(final String user, final Permissions permissions) { + return executorAsync().execute(() -> updateUserDefaultCollectionAccessRequest(user, permissions), Void.class); + } + + @Override + public CompletableFuture getPermissions(final String user) { + return executorAsync().execute(() -> getPermissionsRequest(user), getPermissionsResponseDeserialzer()); + } + + @Override + public CompletableFuture> query( + final String query, final Class type, final Map bindVars, final AqlQueryOptions options) { + final InternalRequest request = queryRequest(query, bindVars, options); + final HostHandle hostHandle = new HostHandle(); + return executorAsync().execute(() -> request, cursorEntityDeserializer(type), hostHandle) + .thenApply(res -> new ArangoCursorAsyncImpl<>(this, res, type, hostHandle, options.getAllowRetry())); + } + + @Override + public CompletableFuture> query(String query, Class type, AqlQueryOptions options) { + return query(query, type, null, options); + } + + @Override + public CompletableFuture> query(String query, Class type, Map bindVars) { + return query(query, type, bindVars, new AqlQueryOptions()); + } + + @Override + public CompletableFuture> query(String query, Class type) { + return query(query, type, null, new AqlQueryOptions()); + } + + @Override + public CompletableFuture> cursor(final String cursorId, final Class type) { + return cursor(cursorId, type, null, new AqlQueryOptions()); + } + + @Override + public CompletableFuture> cursor(String cursorId, Class type, AqlQueryOptions options) { + return cursor(cursorId, type, null, options); + } + + @Override + public CompletableFuture> cursor(final String cursorId, final Class type, final String nextBatchId) { + return cursor(cursorId, type, nextBatchId, new AqlQueryOptions()); + } + + @Override + public CompletableFuture> cursor(String cursorId, Class type, String nextBatchId, AqlQueryOptions options) { + options.allowRetry(nextBatchId != null); + HostHandle hostHandle = new HostHandle(); + return executorAsync() + .execute(() -> queryNextRequest(cursorId, options, nextBatchId), + cursorEntityDeserializer(type), + hostHandle) + .thenApply(res -> new ArangoCursorAsyncImpl<>(this, res, type, hostHandle, nextBatchId != null)); + } + + @Override + public CompletableFuture explainQuery( + final String query, final Map bindVars, final AqlQueryExplainOptions options) { + return executorAsync().execute(() -> explainQueryRequest(query, bindVars, options), AqlExecutionExplainEntity.class); + } + + @Override + public CompletableFuture explainAqlQuery( + String query, Map bindVars, AqlQueryExplainOptions options) { + return executorAsync().execute(() -> explainQueryRequest(query, bindVars, options), AqlQueryExplainEntity.class); + } + + @Override + public CompletableFuture explainAqlQuery(String query, Map bindVars, ExplainAqlQueryOptions options) { + return executorAsync().execute(() -> explainQueryRequest(query, bindVars, options), AqlQueryExplainEntity.class); + } + + @Override + public CompletableFuture parseQuery(final String query) { + return executorAsync().execute(() -> parseQueryRequest(query), AqlParseEntity.class); + } + + @Override + public CompletableFuture clearQueryCache() { + return executorAsync().execute(this::clearQueryCacheRequest, Void.class); + } + + @Override + public CompletableFuture getQueryCacheProperties() { + return executorAsync().execute(this::getQueryCachePropertiesRequest, QueryCachePropertiesEntity.class); + } + + @Override + public CompletableFuture setQueryCacheProperties(final QueryCachePropertiesEntity properties) { + return executorAsync().execute(() -> setQueryCachePropertiesRequest(properties), QueryCachePropertiesEntity.class); + } + + @Override + public CompletableFuture getQueryTrackingProperties() { + return executorAsync().execute(this::getQueryTrackingPropertiesRequest, QueryTrackingPropertiesEntity.class); + } + + @Override + public CompletableFuture setQueryTrackingProperties(final QueryTrackingPropertiesEntity properties) { + return executorAsync().execute(() -> setQueryTrackingPropertiesRequest(properties), QueryTrackingPropertiesEntity.class); + } + + @Override + public CompletableFuture> getCurrentlyRunningQueries() { + return executorAsync().execute(this::getCurrentlyRunningQueriesRequest, + constructListType(QueryEntity.class)); + } + + @Override + public CompletableFuture> getSlowQueries() { + return executorAsync().execute(this::getSlowQueriesRequest, + constructListType(QueryEntity.class)); + } + + @Override + public CompletableFuture clearSlowQueries() { + return executorAsync().execute(this::clearSlowQueriesRequest, Void.class); + } + + @Override + public CompletableFuture killQuery(final String id) { + return executorAsync().execute(() -> killQueryRequest(id), Void.class); + } + + @Override + public CompletableFuture createAqlFunction( + final String name, final String code, final AqlFunctionCreateOptions options) { + return executorAsync().execute(() -> createAqlFunctionRequest(name, code, options), Void.class); + } + + @Override + public CompletableFuture deleteAqlFunction(final String name, final AqlFunctionDeleteOptions options) { + return executorAsync().execute(() -> deleteAqlFunctionRequest(name, options), deleteAqlFunctionResponseDeserializer()); + } + + @Override + public CompletableFuture> getAqlFunctions(final AqlFunctionGetOptions options) { + return executorAsync().execute(() -> getAqlFunctionsRequest(options), getAqlFunctionsResponseDeserializer()); + } + + @Override + public ArangoGraphAsync graph(final String name) { + return new ArangoGraphAsyncImpl(this, name); + } + + @Override + public CompletableFuture createGraph(final String name, final Iterable edgeDefinitions) { + return createGraph(name, edgeDefinitions, new GraphCreateOptions()); + } + + @Override + public CompletableFuture createGraph( + final String name, final Iterable edgeDefinitions, final GraphCreateOptions options) { + return executorAsync().execute(() -> createGraphRequest(name, edgeDefinitions, options), createGraphResponseDeserializer()); + } + + @Override + public CompletableFuture> getGraphs() { + return executorAsync().execute(this::getGraphsRequest, getGraphsResponseDeserializer()); + } + + @Override + public CompletableFuture transaction(final String action, final Class type, final TransactionOptions options) { + return executorAsync().execute(() -> transactionRequest(action, options), transactionResponseDeserializer(type)); + } + + @Override + public CompletableFuture beginStreamTransaction(StreamTransactionOptions options) { + return executorAsync().execute(() -> beginStreamTransactionRequest(options), streamTransactionResponseDeserializer()); + } + + @Override + public CompletableFuture abortStreamTransaction(String id) { + return executorAsync().execute(() -> abortStreamTransactionRequest(id), streamTransactionResponseDeserializer()); + } + + @Override + public CompletableFuture getStreamTransaction(String id) { + return executorAsync().execute(() -> getStreamTransactionRequest(id), streamTransactionResponseDeserializer()); + } + + @Override + public CompletableFuture> getStreamTransactions() { + return executorAsync().execute(this::getStreamTransactionsRequest, transactionsResponseDeserializer()); + } + + @Override + public CompletableFuture commitStreamTransaction(String id) { + return executorAsync().execute(() -> commitStreamTransactionRequest(id), streamTransactionResponseDeserializer()); + } + + @Override + public CompletableFuture getInfo() { + return executorAsync().execute(this::getInfoRequest, getInfoResponseDeserializer()); + } + + @Override + public CompletableFuture reloadRouting() { + return executorAsync().execute(this::reloadRoutingRequest, Void.class); + } + + @Override + public CompletableFuture> getViews() { + return executorAsync().execute(this::getViewsRequest, getViewsResponseDeserializer()); + } + + @Override + public ArangoViewAsync view(final String name) { + return new ArangoViewAsyncImpl(this, name); + } + + @Override + public ArangoSearchAsync arangoSearch(final String name) { + return new ArangoSearchAsyncImpl(this, name); + } + + @Override + public SearchAliasAsync searchAlias(String name) { + return new SearchAliasAsyncImpl(this, name); + } + + @Override + public CompletableFuture createView(final String name, final ViewType type) { + return executorAsync().execute(() -> createViewRequest(name, type), ViewEntity.class); + } + + @Override + public CompletableFuture createArangoSearch(final String name, final ArangoSearchCreateOptions options) { + return executorAsync().execute(() -> createArangoSearchRequest(name, options), ViewEntity.class); + } + + @Override + public CompletableFuture createSearchAlias(String name, SearchAliasCreateOptions options) { + return executorAsync().execute(() -> createSearchAliasRequest(name, options), ViewEntity.class); + } + + @Override + public CompletableFuture createSearchAnalyzer(SearchAnalyzer analyzer) { + return executorAsync().execute(() -> createAnalyzerRequest(analyzer), SearchAnalyzer.class); + } + + @Override + public CompletableFuture getSearchAnalyzer(String name) { + return executorAsync().execute(() -> getAnalyzerRequest(name), SearchAnalyzer.class); + } + + @Override + public CompletableFuture> getSearchAnalyzers() { + return executorAsync().execute(this::getAnalyzersRequest, getSearchAnalyzersResponseDeserializer()); + } + + @Override + public CompletableFuture deleteSearchAnalyzer(String name) { + return deleteSearchAnalyzer(name, null); + } + + @Override + public CompletableFuture deleteSearchAnalyzer(String name, AnalyzerDeleteOptions options) { + return executorAsync().execute(() -> deleteAnalyzerRequest(name, options), Void.class); + } + +} diff --git a/core/src/main/java/com/arangodb/internal/ArangoDatabaseImpl.java b/core/src/main/java/com/arangodb/internal/ArangoDatabaseImpl.java new file mode 100644 index 000000000..26649883e --- /dev/null +++ b/core/src/main/java/com/arangodb/internal/ArangoDatabaseImpl.java @@ -0,0 +1,446 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.internal; + +import com.arangodb.*; +import com.arangodb.entity.*; +import com.arangodb.entity.arangosearch.analyzer.SearchAnalyzer; +import com.arangodb.internal.cursor.ArangoCursorImpl; +import com.arangodb.internal.net.HostHandle; +import com.arangodb.model.*; +import com.arangodb.model.arangosearch.AnalyzerDeleteOptions; +import com.arangodb.model.arangosearch.ArangoSearchCreateOptions; +import com.arangodb.model.arangosearch.SearchAliasCreateOptions; + +import java.util.Collection; +import java.util.Map; + +import static com.arangodb.internal.ArangoErrors.*; +import static com.arangodb.internal.serde.SerdeUtils.constructListType; + +/** + * @author Mark Vollmary + * @author Michele Rastelli + */ +public class ArangoDatabaseImpl extends InternalArangoDatabase implements ArangoDatabase { + + private final ArangoDB arangoDB; + + protected ArangoDatabaseImpl(final ArangoDBImpl arangoDB, final String name) { + super(arangoDB, name); + this.arangoDB = arangoDB; + } + + @Override + public ArangoDB arango() { + return arangoDB; + } + + @Override + public ArangoDBVersion getVersion() { + return executorSync().execute(getVersionRequest(), ArangoDBVersion.class); + } + + @Override + public ArangoDBEngine getEngine() { + return executorSync().execute(getEngineRequest(), ArangoDBEngine.class); + } + + @Override + public boolean exists() { + try { + getInfo(); + return true; + } catch (final ArangoDBException e) { + if (matches(e, 404, ERROR_ARANGO_DATABASE_NOT_FOUND)) { + return false; + } + throw e; + } + } + + @Override + public Collection getAccessibleDatabases() { + return executorSync().execute(getAccessibleDatabasesRequest(), getDatabaseResponseDeserializer()); + } + + @Override + public ArangoCollection collection(final String name) { + return new ArangoCollectionImpl(this, name); + } + + @Override + public CollectionEntity createCollection(final String name) { + return executorSync().execute(createCollectionRequest(name, new CollectionCreateOptions()), CollectionEntity.class); + } + + @Override + public CollectionEntity createCollection(final String name, final CollectionCreateOptions options) { + return executorSync().execute(createCollectionRequest(name, options), CollectionEntity.class); + } + + @Override + public Collection getCollections() { + return executorSync() + .execute(getCollectionsRequest(new CollectionsReadOptions()), getCollectionsResponseDeserializer()); + } + + @Override + public Collection getCollections(final CollectionsReadOptions options) { + return executorSync().execute(getCollectionsRequest(options), getCollectionsResponseDeserializer()); + } + + @Override + public IndexEntity getIndex(final String id) { + final String[] split = id.split("/"); + return collection(split[0]).getIndex(split[1]); + } + + @Override + public String deleteIndex(final String id) { + final String[] split = id.split("/"); + return collection(split[0]).deleteIndex(split[1]); + } + + @Override + public Boolean create() { + return arango().createDatabase(name()); + } + + @Override + public Boolean drop() { + return executorSync().execute(dropRequest(), createDropResponseDeserializer()); + } + + @Override + public void grantAccess(final String user, final Permissions permissions) { + executorSync().execute(grantAccessRequest(user, permissions), Void.class); + } + + @Override + public void grantAccess(final String user) { + executorSync().execute(grantAccessRequest(user, Permissions.RW), Void.class); + } + + @Override + public void revokeAccess(final String user) { + executorSync().execute(grantAccessRequest(user, Permissions.NONE), Void.class); + } + + @Override + public void resetAccess(final String user) { + executorSync().execute(resetAccessRequest(user), Void.class); + } + + @Override + public void grantDefaultCollectionAccess(final String user, final Permissions permissions) { + executorSync().execute(updateUserDefaultCollectionAccessRequest(user, permissions), Void.class); + } + + @Override + public Permissions getPermissions(final String user) { + return executorSync().execute(getPermissionsRequest(user), getPermissionsResponseDeserialzer()); + } + + @Override + public ArangoCursor query( + final String query, final Class type, final Map bindVars, final AqlQueryOptions options) { + final InternalRequest request = queryRequest(query, bindVars, options); + final HostHandle hostHandle = new HostHandle(); + final CursorEntity result = executorSync().execute(request, cursorEntityDeserializer(type), hostHandle); + return createCursor(result, type, options, hostHandle); + } + + @Override + public ArangoCursor query(final String query, final Class type, final Map bindVars) { + return query(query, type, bindVars, new AqlQueryOptions()); + } + + @Override + public ArangoCursor query(final String query, final Class type, final AqlQueryOptions options) { + return query(query, type, null, options); + } + + @Override + public ArangoCursor query(final String query, final Class type) { + return query(query, type, null, new AqlQueryOptions()); + } + + @Override + public ArangoCursor cursor(final String cursorId, final Class type) { + return cursor(cursorId, type, null, new AqlQueryOptions()); + } + + @Override + public ArangoCursor cursor(final String cursorId, final Class type, final AqlQueryOptions options) { + return cursor(cursorId, type, null, options); + } + + @Override + public ArangoCursor cursor(final String cursorId, final Class type, final String nextBatchId) { + return cursor(cursorId, type, nextBatchId, new AqlQueryOptions()); + } + + @Override + public ArangoCursor cursor(final String cursorId, final Class type, final String nextBatchId, final AqlQueryOptions options) { + options.allowRetry(nextBatchId != null); + HostHandle hostHandle = new HostHandle(); + CursorEntity result = executorSync().execute( + queryNextRequest(cursorId, options, nextBatchId), + cursorEntityDeserializer(type), + hostHandle); + return createCursor(result, type, options, hostHandle); + } + + private ArangoCursor createCursor( + final CursorEntity result, + final Class type, + final AqlQueryOptions opts, + final HostHandle hostHandle) { + AqlQueryOptions options = opts != null ? opts : new AqlQueryOptions(); + + final ArangoCursorExecute execute = new ArangoCursorExecute() { + @Override + public CursorEntity next(final String id, final String nextBatchId) { + return executorSync().execute(queryNextRequest(id, options, nextBatchId), cursorEntityDeserializer(type), hostHandle); + } + + @Override + public void close(final String id) { + try { + executorSync().execute(queryCloseRequest(id, options), Void.class, hostHandle); + } catch (final ArangoDBException e) { + // ignore errors Response: 404, Error: 1600 - cursor not found + if (!matches(e, 404, 1600)) { + throw e; + } + } + } + }; + return new ArangoCursorImpl<>(execute, type, result, options.getAllowRetry()); + } + + @Override + public AqlExecutionExplainEntity explainQuery( + final String query, final Map bindVars, final AqlQueryExplainOptions options) { + return executorSync().execute(explainQueryRequest(query, bindVars, options), AqlExecutionExplainEntity.class); + } + + @Override + public AqlQueryExplainEntity explainAqlQuery(String query, Map bindVars, AqlQueryExplainOptions options) { + return executorSync().execute(explainQueryRequest(query, bindVars, options), AqlQueryExplainEntity.class); + } + + @Override + public AqlQueryExplainEntity explainAqlQuery(String query, Map bindVars, ExplainAqlQueryOptions options) { + return executorSync().execute(explainQueryRequest(query, bindVars, options), AqlQueryExplainEntity.class); + } + + @Override + public AqlParseEntity parseQuery(final String query) { + return executorSync().execute(parseQueryRequest(query), AqlParseEntity.class); + } + + @Override + public void clearQueryCache() { + executorSync().execute(clearQueryCacheRequest(), Void.class); + } + + @Override + public QueryCachePropertiesEntity getQueryCacheProperties() { + return executorSync().execute(getQueryCachePropertiesRequest(), QueryCachePropertiesEntity.class); + } + + @Override + public QueryCachePropertiesEntity setQueryCacheProperties(final QueryCachePropertiesEntity properties) { + return executorSync().execute(setQueryCachePropertiesRequest(properties), QueryCachePropertiesEntity.class); + } + + @Override + public QueryTrackingPropertiesEntity getQueryTrackingProperties() { + return executorSync().execute(getQueryTrackingPropertiesRequest(), QueryTrackingPropertiesEntity.class); + } + + @Override + public QueryTrackingPropertiesEntity setQueryTrackingProperties(final QueryTrackingPropertiesEntity properties) { + return executorSync().execute(setQueryTrackingPropertiesRequest(properties), QueryTrackingPropertiesEntity.class); + } + + @Override + public Collection getCurrentlyRunningQueries() { + return executorSync().execute(getCurrentlyRunningQueriesRequest(), + constructListType(QueryEntity.class)); + } + + @Override + public Collection getSlowQueries() { + return executorSync().execute(getSlowQueriesRequest(), + constructListType(QueryEntity.class)); + } + + @Override + public void clearSlowQueries() { + executorSync().execute(clearSlowQueriesRequest(), Void.class); + } + + @Override + public void killQuery(final String id) { + executorSync().execute(killQueryRequest(id), Void.class); + } + + @Override + public void createAqlFunction( + final String name, final String code, final AqlFunctionCreateOptions options) { + executorSync().execute(createAqlFunctionRequest(name, code, options), Void.class); + } + + @Override + public Integer deleteAqlFunction(final String name, final AqlFunctionDeleteOptions options) { + return executorSync().execute(deleteAqlFunctionRequest(name, options), deleteAqlFunctionResponseDeserializer()); + } + + @Override + public Collection getAqlFunctions(final AqlFunctionGetOptions options) { + return executorSync().execute(getAqlFunctionsRequest(options), getAqlFunctionsResponseDeserializer()); + } + + @Override + public ArangoGraph graph(final String name) { + return new ArangoGraphImpl(this, name); + } + + @Override + public GraphEntity createGraph(final String name, final Iterable edgeDefinitions) { + return createGraph(name, edgeDefinitions, new GraphCreateOptions()); + } + + @Override + public GraphEntity createGraph( + final String name, final Iterable edgeDefinitions, final GraphCreateOptions options) { + return executorSync().execute(createGraphRequest(name, edgeDefinitions, options), createGraphResponseDeserializer()); + } + + @Override + public Collection getGraphs() { + return executorSync().execute(getGraphsRequest(), getGraphsResponseDeserializer()); + } + + @Override + public T transaction(final String action, final Class type, final TransactionOptions options) { + return executorSync().execute(transactionRequest(action, options), transactionResponseDeserializer(type)); + } + + @Override + public StreamTransactionEntity beginStreamTransaction(StreamTransactionOptions options) { + return executorSync().execute(beginStreamTransactionRequest(options), streamTransactionResponseDeserializer()); + } + + @Override + public StreamTransactionEntity abortStreamTransaction(String id) { + return executorSync().execute(abortStreamTransactionRequest(id), streamTransactionResponseDeserializer()); + } + + @Override + public StreamTransactionEntity getStreamTransaction(String id) { + return executorSync().execute(getStreamTransactionRequest(id), streamTransactionResponseDeserializer()); + } + + @Override + public Collection getStreamTransactions() { + return executorSync().execute(getStreamTransactionsRequest(), transactionsResponseDeserializer()); + } + + @Override + public StreamTransactionEntity commitStreamTransaction(String id) { + return executorSync().execute(commitStreamTransactionRequest(id), streamTransactionResponseDeserializer()); + } + + @Override + public DatabaseEntity getInfo() { + return executorSync().execute(getInfoRequest(), getInfoResponseDeserializer()); + } + + @Override + public void reloadRouting() { + executorSync().execute(reloadRoutingRequest(), Void.class); + } + + @Override + public Collection getViews() { + return executorSync().execute(getViewsRequest(), getViewsResponseDeserializer()); + } + + @Override + public ArangoView view(final String name) { + return new ArangoViewImpl(this, name); + } + + @Override + public ArangoSearch arangoSearch(final String name) { + return new ArangoSearchImpl(this, name); + } + + @Override + public SearchAlias searchAlias(String name) { + return new SearchAliasImpl(this, name); + } + + @Override + public ViewEntity createView(final String name, final ViewType type) { + return executorSync().execute(createViewRequest(name, type), ViewEntity.class); + } + + @Override + public ViewEntity createArangoSearch(final String name, final ArangoSearchCreateOptions options) { + return executorSync().execute(createArangoSearchRequest(name, options), ViewEntity.class); + } + + @Override + public ViewEntity createSearchAlias(String name, SearchAliasCreateOptions options) { + return executorSync().execute(createSearchAliasRequest(name, options), ViewEntity.class); + } + + @Override + public SearchAnalyzer createSearchAnalyzer(SearchAnalyzer analyzer) { + return executorSync().execute(createAnalyzerRequest(analyzer), SearchAnalyzer.class); + } + + @Override + public SearchAnalyzer getSearchAnalyzer(String name) { + return executorSync().execute(getAnalyzerRequest(name), SearchAnalyzer.class); + } + + @Override + public Collection getSearchAnalyzers() { + return executorSync().execute(getAnalyzersRequest(), getSearchAnalyzersResponseDeserializer()); + } + + @Override + public void deleteSearchAnalyzer(String name) { + deleteSearchAnalyzer(name, null); + } + + @Override + public void deleteSearchAnalyzer(String name, AnalyzerDeleteOptions options) { + executorSync().execute(deleteAnalyzerRequest(name, options), Void.class); + } + +} diff --git a/core/src/main/java/com/arangodb/internal/ArangoDefaults.java b/core/src/main/java/com/arangodb/internal/ArangoDefaults.java new file mode 100644 index 000000000..2f68fd53e --- /dev/null +++ b/core/src/main/java/com/arangodb/internal/ArangoDefaults.java @@ -0,0 +1,70 @@ +/* + * DISCLAIMER + * + * Copyright 2018 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.internal; + +import com.arangodb.Compression; +import com.arangodb.Protocol; +import com.arangodb.config.HostDescription; +import com.arangodb.entity.LoadBalancingStrategy; + +import java.util.Collections; +import java.util.List; + +/** + * @author Mark Vollmary + */ +public final class ArangoDefaults { + + public static final int INTEGER_BYTES = Integer.SIZE / Byte.SIZE; + public static final int LONG_BYTES = Long.SIZE / Byte.SIZE; + public static final int CHUNK_MIN_HEADER_SIZE = INTEGER_BYTES + INTEGER_BYTES + LONG_BYTES; + public static final int CHUNK_MAX_HEADER_SIZE = CHUNK_MIN_HEADER_SIZE + LONG_BYTES; + public static final int MAX_CONNECTIONS_VST_DEFAULT = 1; + public static final int MAX_CONNECTIONS_HTTP_DEFAULT = 20; + public static final int MAX_CONNECTIONS_HTTP2_DEFAULT = 1; + + // default config properties + public static final List DEFAULT_HOSTS = Collections.emptyList(); + public static final Protocol DEFAULT_PROTOCOL = Protocol.HTTP2_JSON; + public static final String DEFAULT_USER = "root"; + public static final Integer DEFAULT_TIMEOUT = 0; + public static final Long DEFAULT_CONNECTION_TTL_HTTP = 30_000L; + public static final Boolean DEFAULT_USE_SSL = false; + public static final String DEFAULT_SSL_PROTOCOL = "TLS"; + public static final Boolean DEFAULT_VERIFY_HOST = true; + public static final Integer DEFAULT_CHUNK_SIZE = 30_000; + public static final Boolean DEFAULT_PIPELINING = false; + public static final Boolean DEFAULT_ACQUIRE_HOST_LIST = false; + public static final Integer DEFAULT_ACQUIRE_HOST_LIST_INTERVAL = 60 * 60 * 1000; // hour + public static final LoadBalancingStrategy DEFAULT_LOAD_BALANCING_STRATEGY = LoadBalancingStrategy.NONE; + public static final Integer DEFAULT_RESPONSE_QUEUE_TIME_SAMPLES = 10; + + // region compression + public static final Compression DEFAULT_COMPRESSION = Compression.NONE; + public static final Integer DEFAULT_COMPRESSION_THRESHOLD = 1024; + public static final Integer DEFAULT_COMPRESSION_LEVEL = 6; + // endregion + + private ArangoDefaults() { + super(); + } + +} diff --git a/core/src/main/java/com/arangodb/internal/ArangoEdgeCollectionAsyncImpl.java b/core/src/main/java/com/arangodb/internal/ArangoEdgeCollectionAsyncImpl.java new file mode 100644 index 000000000..f6348df26 --- /dev/null +++ b/core/src/main/java/com/arangodb/internal/ArangoEdgeCollectionAsyncImpl.java @@ -0,0 +1,140 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.internal; + +import com.arangodb.ArangoDBException; +import com.arangodb.ArangoEdgeCollectionAsync; +import com.arangodb.ArangoGraphAsync; +import com.arangodb.entity.EdgeEntity; +import com.arangodb.entity.EdgeUpdateEntity; +import com.arangodb.model.*; + +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CompletionException; + +import static com.arangodb.internal.ArangoErrors.*; + +/** + * @author Mark Vollmary + */ +public class ArangoEdgeCollectionAsyncImpl extends InternalArangoEdgeCollection implements ArangoEdgeCollectionAsync { + + private final ArangoGraphAsync graph; + + protected ArangoEdgeCollectionAsyncImpl(final ArangoGraphAsyncImpl graph, final String name) { + super(graph, graph.db().name(), graph.name(), name); + this.graph = graph; + } + + @Override + public ArangoGraphAsync graph() { + return graph; + } + + @Deprecated + @Override + public CompletableFuture drop() { + return drop(new EdgeCollectionDropOptions()); + } + + @Deprecated + @Override + public CompletableFuture drop(final EdgeCollectionDropOptions options) { + return executorAsync().execute(() -> removeEdgeDefinitionRequest(options), Void.class); + } + + @Override + public CompletableFuture remove() { + return remove(new EdgeCollectionRemoveOptions()); + } + + @Override + public CompletableFuture remove(final EdgeCollectionRemoveOptions options) { + return executorAsync().execute(() -> removeEdgeDefinitionRequest(options), Void.class); + } + + @Override + public CompletableFuture insertEdge(final Object value) { + return executorAsync().execute(() -> insertEdgeRequest(value, new EdgeCreateOptions()), + insertEdgeResponseDeserializer()); + } + + @Override + public CompletableFuture insertEdge(final Object value, final EdgeCreateOptions options) { + return executorAsync().execute(() -> insertEdgeRequest(value, options), insertEdgeResponseDeserializer()); + } + + @Override + public CompletableFuture getEdge(final String key, final Class type) { + return getEdge(key, type, null); + } + + @Override + public CompletableFuture getEdge(final String key, final Class type, final GraphDocumentReadOptions options) { + return executorAsync().execute(() -> getEdgeRequest(key, options), getEdgeResponseDeserializer(type)) + .exceptionally(err -> { + Throwable e = err instanceof CompletionException ? err.getCause() : err; + if (e instanceof ArangoDBException) { + ArangoDBException aEx = (ArangoDBException) e; + if (matches(aEx, 304) + || matches(aEx, 404, ERROR_ARANGO_DOCUMENT_NOT_FOUND) + || matches(aEx, 412, ERROR_ARANGO_CONFLICT) + ) { + return null; + } + } + throw ArangoDBException.of(e); + }); + } + + @Override + public CompletableFuture replaceEdge(final String key, final Object value) { + return executorAsync().execute(() -> replaceEdgeRequest(key, value, new EdgeReplaceOptions()), + replaceEdgeResponseDeserializer()); + } + + @Override + public CompletableFuture replaceEdge(final String key, final Object value, final EdgeReplaceOptions options) { + return executorAsync().execute(() -> replaceEdgeRequest(key, value, options), replaceEdgeResponseDeserializer()); + } + + @Override + public CompletableFuture updateEdge(final String key, final Object value) { + return executorAsync().execute(() -> updateEdgeRequest(key, value, new EdgeUpdateOptions()), + updateEdgeResponseDeserializer()); + } + + @Override + public CompletableFuture updateEdge(final String key, final Object value, final EdgeUpdateOptions options) { + return executorAsync().execute(() -> updateEdgeRequest(key, value, options), updateEdgeResponseDeserializer()); + } + + @Override + public CompletableFuture deleteEdge(final String key) { + return executorAsync().execute(() -> deleteEdgeRequest(key, new EdgeDeleteOptions()), Void.class); + } + + @Override + public CompletableFuture deleteEdge(final String key, final EdgeDeleteOptions options) { + return executorAsync().execute(() -> deleteEdgeRequest(key, options), Void.class); + } + +} diff --git a/core/src/main/java/com/arangodb/internal/ArangoEdgeCollectionImpl.java b/core/src/main/java/com/arangodb/internal/ArangoEdgeCollectionImpl.java new file mode 100644 index 000000000..12cc9dce6 --- /dev/null +++ b/core/src/main/java/com/arangodb/internal/ArangoEdgeCollectionImpl.java @@ -0,0 +1,134 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.internal; + +import com.arangodb.ArangoDBException; +import com.arangodb.ArangoEdgeCollection; +import com.arangodb.ArangoGraph; +import com.arangodb.entity.EdgeEntity; +import com.arangodb.entity.EdgeUpdateEntity; +import com.arangodb.model.*; + +import static com.arangodb.internal.ArangoErrors.*; + +/** + * @author Mark Vollmary + */ +public class ArangoEdgeCollectionImpl extends InternalArangoEdgeCollection implements ArangoEdgeCollection { + + private final ArangoGraphImpl graph; + + protected ArangoEdgeCollectionImpl(final ArangoGraphImpl graph, final String name) { + super(graph, graph.db().name(), graph.name(), name); + this.graph = graph; + } + + @Override + public ArangoGraph graph() { + return graph; + } + + @Deprecated + @Override + public void drop() { + drop(new EdgeCollectionDropOptions()); + } + + @Deprecated + @Override + public void drop(final EdgeCollectionDropOptions options) { + executorSync().execute(removeEdgeDefinitionRequest(options), Void.class); + } + + @Override + public void remove() { + remove(new EdgeCollectionRemoveOptions()); + } + + @Override + public void remove(final EdgeCollectionRemoveOptions options) { + executorSync().execute(removeEdgeDefinitionRequest(options), Void.class); + } + + @Override + public EdgeEntity insertEdge(final Object value) { + return executorSync().execute(insertEdgeRequest(value, new EdgeCreateOptions()), + insertEdgeResponseDeserializer()); + } + + @Override + public EdgeEntity insertEdge(final Object value, final EdgeCreateOptions options) { + return executorSync().execute(insertEdgeRequest(value, options), insertEdgeResponseDeserializer()); + } + + @Override + public T getEdge(final String key, final Class type) { + return getEdge(key, type, null); + } + + @Override + public T getEdge(final String key, final Class type, final GraphDocumentReadOptions options) { + try { + return executorSync().execute(getEdgeRequest(key, options), getEdgeResponseDeserializer(type)); + } catch (final ArangoDBException e) { + if (matches(e, 304) + || matches(e, 404, ERROR_ARANGO_DOCUMENT_NOT_FOUND) + || matches(e, 412, ERROR_ARANGO_CONFLICT) + ) { + return null; + } + throw e; + } + } + + @Override + public EdgeUpdateEntity replaceEdge(final String key, final Object value) { + return executorSync().execute(replaceEdgeRequest(key, value, new EdgeReplaceOptions()), + replaceEdgeResponseDeserializer()); + } + + @Override + public EdgeUpdateEntity replaceEdge(final String key, final Object value, final EdgeReplaceOptions options) { + return executorSync().execute(replaceEdgeRequest(key, value, options), replaceEdgeResponseDeserializer()); + } + + @Override + public EdgeUpdateEntity updateEdge(final String key, final Object value) { + return executorSync().execute(updateEdgeRequest(key, value, new EdgeUpdateOptions()), + updateEdgeResponseDeserializer()); + } + + @Override + public EdgeUpdateEntity updateEdge(final String key, final Object value, final EdgeUpdateOptions options) { + return executorSync().execute(updateEdgeRequest(key, value, options), updateEdgeResponseDeserializer()); + } + + @Override + public void deleteEdge(final String key) { + executorSync().execute(deleteEdgeRequest(key, new EdgeDeleteOptions()), Void.class); + } + + @Override + public void deleteEdge(final String key, final EdgeDeleteOptions options) { + executorSync().execute(deleteEdgeRequest(key, options), Void.class); + } + +} diff --git a/core/src/main/java/com/arangodb/internal/ArangoErrors.java b/core/src/main/java/com/arangodb/internal/ArangoErrors.java new file mode 100644 index 000000000..674ba2f2f --- /dev/null +++ b/core/src/main/java/com/arangodb/internal/ArangoErrors.java @@ -0,0 +1,50 @@ +/* + * DISCLAIMER + * + * Copyright 2018 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.internal; + +import com.arangodb.ArangoDBException; + +/** + * @author Mark Vollmary + */ +public final class ArangoErrors { + + public static final Integer ERROR_ARANGO_CONFLICT = 1200; + public static final Integer ERROR_ARANGO_DOCUMENT_NOT_FOUND = 1202; + public static final Integer ERROR_ARANGO_DATA_SOURCE_NOT_FOUND = 1203; + public static final Integer ERROR_ARANGO_DATABASE_NOT_FOUND = 1228; + public static final Integer ERROR_GRAPH_NOT_FOUND = 1924; + public static final Integer QUEUE_TIME_VIOLATED = 21004; + + public static boolean matches(ArangoDBException e, int responseCode, int errorNum) { + return matches(e, responseCode) + && e.getErrorNum() != null && e.getErrorNum() == errorNum; + } + + public static boolean matches(ArangoDBException e, int responseCode) { + return e.getResponseCode() != null && e.getResponseCode() == responseCode; + } + + private ArangoErrors() { + super(); + } + +} diff --git a/core/src/main/java/com/arangodb/internal/ArangoExecuteable.java b/core/src/main/java/com/arangodb/internal/ArangoExecuteable.java new file mode 100644 index 000000000..db67992e2 --- /dev/null +++ b/core/src/main/java/com/arangodb/internal/ArangoExecuteable.java @@ -0,0 +1,92 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.internal; + +import com.arangodb.ArangoSerdeAccessor; +import com.arangodb.internal.config.ArangoConfig; +import com.arangodb.internal.net.CommunicationProtocol; +import com.arangodb.internal.serde.InternalSerde; +import com.arangodb.internal.util.EncodeUtils; + +/** + * @author Mark Vollmary + */ +public abstract class ArangoExecuteable implements ArangoSerdeAccessor { + + private static final String SLASH = "/"; + + private final ArangoExecutorSync executorSync; + private final ArangoExecutorAsync executorAsync; + private final InternalSerde serde; + + protected ArangoExecuteable(final CommunicationProtocol protocol, final ArangoConfig config) { + this(new ArangoExecutorSync(protocol, config), new ArangoExecutorAsync(protocol, config), config.getInternalSerde()); + } + + protected ArangoExecuteable(final ArangoExecuteable other) { + this(other.executorSync, other.executorAsync, other.serde); + } + + private ArangoExecuteable(final ArangoExecutorSync executorSync, + final ArangoExecutorAsync executorAsync, + final InternalSerde serde) { + this.executorSync = executorSync; + this.executorAsync = executorAsync; + this.serde = serde; + } + + + protected static String createPath(final String... params) { + final StringBuilder sb = new StringBuilder(); + for (int i = 0; i < params.length; i++) { + if (params[i] == null) continue; + if (i > 0) { + sb.append(SLASH); + } + final String param; + if (params[i].contains(SLASH)) { + param = createPath(params[i].split(SLASH)); + } else { + param = EncodeUtils.encodeURIComponent(params[i]); + } + sb.append(param); + } + return sb.toString(); + } + + protected ArangoExecutorSync executorSync() { + return executorSync; + } + + protected ArangoExecutorAsync executorAsync() { + return executorAsync; + } + + @Override + public InternalSerde getSerde() { + return serde; + } + + protected InternalRequest request(final String dbName, final RequestType requestType, final String... path) { + return new InternalRequest(dbName, requestType, createPath(path)); + } + +} diff --git a/core/src/main/java/com/arangodb/internal/ArangoExecutor.java b/core/src/main/java/com/arangodb/internal/ArangoExecutor.java new file mode 100644 index 000000000..3f491f701 --- /dev/null +++ b/core/src/main/java/com/arangodb/internal/ArangoExecutor.java @@ -0,0 +1,84 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.internal; + +import com.arangodb.ArangoDBException; +import com.arangodb.QueueTimeMetrics; +import com.arangodb.internal.config.ArangoConfig; +import com.arangodb.internal.net.CommunicationProtocol; +import com.arangodb.internal.serde.InternalSerde; + +import java.io.IOException; +import java.lang.reflect.Type; + +/** + * @author Mark Vollmary + */ +public abstract class ArangoExecutor { + + protected final CommunicationProtocol protocol; + private final QueueTimeMetricsImpl qtMetrics; + private final InternalSerde serde; + private final String timeoutS; + + protected ArangoExecutor(final CommunicationProtocol protocol, final ArangoConfig config ) { + this.protocol = protocol; + qtMetrics = new QueueTimeMetricsImpl(config.getResponseQueueTimeSamples()); + serde = config.getInternalSerde(); + timeoutS = config.getTimeout() >= 1000 ? Integer.toString(config.getTimeout() / 1000) : null; + } + + public void disconnect() { + try { + protocol.close(); + } catch (final IOException e) { + throw ArangoDBException.of(e); + } + } + + public void setJwt(String jwt) { + protocol.setJwt(jwt); + } + + protected T createResult(final Type type, final InternalResponse response) { + return serde.deserialize(response.getBody(), type); + } + + protected final void interceptResponse(InternalResponse response) { + String queueTime = response.getMeta("X-Arango-Queue-Time-Seconds"); + if (queueTime != null) { + qtMetrics.add(Double.parseDouble(queueTime)); + } + } + + protected final InternalRequest interceptRequest(InternalRequest request) { + request.putHeaderParam("x-arango-queue-time-seconds", timeoutS); + return request; + } + + public QueueTimeMetrics getQueueTimeMetrics() { + return qtMetrics; + } + + public interface ResponseDeserializer { + T deserialize(InternalResponse response); + } +} diff --git a/core/src/main/java/com/arangodb/internal/ArangoExecutorAsync.java b/core/src/main/java/com/arangodb/internal/ArangoExecutorAsync.java new file mode 100644 index 000000000..cb1f1c2f3 --- /dev/null +++ b/core/src/main/java/com/arangodb/internal/ArangoExecutorAsync.java @@ -0,0 +1,97 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.internal; + +import com.arangodb.ArangoDBException; +import com.arangodb.internal.config.ArangoConfig; +import com.arangodb.internal.net.CommunicationProtocol; +import com.arangodb.internal.net.HostHandle; +import com.arangodb.RequestContext; + +import java.lang.reflect.Type; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.Executor; +import java.util.function.Function; +import java.util.function.Supplier; + +/** + * @author Mark Vollmary + * @author Michele Rastelli + */ +public class ArangoExecutorAsync extends ArangoExecutor { + private final Executor downstreamExecutor; + + public ArangoExecutorAsync(final CommunicationProtocol protocol, final ArangoConfig config) { + super(protocol, config); + downstreamExecutor = config.getAsyncExecutor(); + } + + public CompletableFuture execute(final Supplier requestSupplier, final Type type) { + return execute(requestSupplier, type, null); + } + + public CompletableFuture execute(final Supplier requestSupplier, final Type type, final HostHandle hostHandle) { + return execute(requestSupplier, (response) -> createResult(type, response), hostHandle); + } + + public CompletableFuture execute(final Supplier requestSupplier, final ResponseDeserializer responseDeserializer) { + return execute(requestSupplier, responseDeserializer, null); + } + + public CompletableFuture execute( + final Supplier requestSupplier, + final ResponseDeserializer responseDeserializer, + final HostHandle hostHandle) { + + CompletableFuture cf = CompletableFuture.completedFuture(requestSupplier) + .thenApply(Supplier::get) + .thenCompose(request -> protocol + .executeAsync(interceptRequest(request), hostHandle) + .thenApply(resp -> new ResponseWithRequest(resp, new RequestContextImpl(request))) + ) + .handle((r, e) -> { + if (e != null) { + throw ArangoDBException.of(e); + } else { + interceptResponse(r.response); + return RequestContextHolder.INSTANCE.runWithCtx(r.context, () -> + responseDeserializer.deserialize(r.response)); + } + }); + + if (downstreamExecutor != null) { + return cf.thenApplyAsync(Function.identity(), downstreamExecutor); + } else { + return cf; + } + } + + private static class ResponseWithRequest { + final InternalResponse response; + final RequestContext context; + + ResponseWithRequest(InternalResponse response, RequestContext context) { + this.response = response; + this.context = context; + } + } + +} diff --git a/core/src/main/java/com/arangodb/internal/ArangoExecutorSync.java b/core/src/main/java/com/arangodb/internal/ArangoExecutorSync.java new file mode 100644 index 000000000..dfd9f986c --- /dev/null +++ b/core/src/main/java/com/arangodb/internal/ArangoExecutorSync.java @@ -0,0 +1,61 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.internal; + +import com.arangodb.internal.config.ArangoConfig; +import com.arangodb.internal.net.CommunicationProtocol; +import com.arangodb.internal.net.HostHandle; + +import java.lang.reflect.Type; + +/** + * @author Mark Vollmary + */ +public class ArangoExecutorSync extends ArangoExecutor { + + public ArangoExecutorSync(final CommunicationProtocol protocol, final ArangoConfig config) { + super(protocol, config); + } + + public T execute(final InternalRequest request, final Type type) { + return execute(request, type, null); + } + + public T execute(final InternalRequest request, final Type type, final HostHandle hostHandle) { + return execute(request, (response) -> createResult(type, response), hostHandle); + } + + public T execute(final InternalRequest request, final ResponseDeserializer responseDeserializer) { + return execute(request, responseDeserializer, null); + } + + public T execute( + final InternalRequest request, + final ResponseDeserializer responseDeserializer, + final HostHandle hostHandle) { + + final InternalResponse response = protocol.execute(interceptRequest(request), hostHandle); + interceptResponse(response); + return RequestContextHolder.INSTANCE.runWithCtx(new RequestContextImpl(request), () -> + responseDeserializer.deserialize(response)); + } + +} diff --git a/core/src/main/java/com/arangodb/internal/ArangoGraphAsyncImpl.java b/core/src/main/java/com/arangodb/internal/ArangoGraphAsyncImpl.java new file mode 100644 index 000000000..89aeb3a68 --- /dev/null +++ b/core/src/main/java/com/arangodb/internal/ArangoGraphAsyncImpl.java @@ -0,0 +1,138 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.internal; + +import com.arangodb.*; +import com.arangodb.entity.EdgeDefinition; +import com.arangodb.entity.GraphEntity; +import com.arangodb.model.GraphCreateOptions; +import com.arangodb.model.ReplaceEdgeDefinitionOptions; +import com.arangodb.model.VertexCollectionCreateOptions; + +import java.util.Collection; +import java.util.Objects; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CompletionException; + +import static com.arangodb.internal.ArangoErrors.ERROR_GRAPH_NOT_FOUND; +import static com.arangodb.internal.ArangoErrors.matches; + +public class ArangoGraphAsyncImpl extends InternalArangoGraph implements ArangoGraphAsync { + + private final ArangoDatabaseAsync db; + + protected ArangoGraphAsyncImpl(final ArangoDatabaseAsyncImpl db, final String name) { + super(db, db.name(), name); + this.db = db; + } + + @Override + public ArangoDatabaseAsync db() { + return db; + } + + @Override + public CompletableFuture exists() { + return getInfo() + .thenApply(Objects::nonNull) + .exceptionally(err -> { + Throwable e = err instanceof CompletionException ? err.getCause() : err; + if (e instanceof ArangoDBException) { + ArangoDBException aEx = (ArangoDBException) e; + if (matches(aEx, 404, ERROR_GRAPH_NOT_FOUND)) { + return false; + } + } + throw ArangoDBException.of(e); + }); + } + + @Override + public CompletableFuture create(final Iterable edgeDefinitions) { + return db().createGraph(name(), edgeDefinitions); + } + + @Override + public CompletableFuture create(final Iterable edgeDefinitions, final GraphCreateOptions options) { + return db().createGraph(name(), edgeDefinitions, options); + } + + @Override + public CompletableFuture drop() { + return executorAsync().execute(this::dropRequest, Void.class); + } + + @Override + public CompletableFuture drop(final boolean dropCollections) { + return executorAsync().execute(() -> dropRequest(dropCollections), Void.class); + } + + @Override + public CompletableFuture getInfo() { + return executorAsync().execute(this::getInfoRequest, getInfoResponseDeserializer()); + } + + @Override + public CompletableFuture> getVertexCollections() { + return executorAsync().execute(this::getVertexCollectionsRequest, getVertexCollectionsResponseDeserializer()); + } + + @Override + public CompletableFuture addVertexCollection(final String name) { + return addVertexCollection(name, new VertexCollectionCreateOptions()); + } + + @Override + public CompletableFuture addVertexCollection(final String name, final VertexCollectionCreateOptions options) { + return executorAsync().execute(() -> addVertexCollectionRequest(name, options), addVertexCollectionResponseDeserializer()); + } + + @Override + public ArangoVertexCollectionAsync vertexCollection(final String name) { + return new ArangoVertexCollectionAsyncImpl(this, name); + } + + @Override + public ArangoEdgeCollectionAsync edgeCollection(final String name) { + return new ArangoEdgeCollectionAsyncImpl(this, name); + } + + @Override + public CompletableFuture> getEdgeDefinitions() { + return executorAsync().execute(this::getEdgeDefinitionsRequest, getEdgeDefinitionsDeserializer()); + } + + @Override + public CompletableFuture addEdgeDefinition(final EdgeDefinition definition) { + return executorAsync().execute(() -> addEdgeDefinitionRequest(definition), addEdgeDefinitionResponseDeserializer()); + } + + @Override + public CompletableFuture replaceEdgeDefinition(final EdgeDefinition definition) { + return replaceEdgeDefinition(definition, new ReplaceEdgeDefinitionOptions()); + } + + @Override + public CompletableFuture replaceEdgeDefinition(final EdgeDefinition definition, final ReplaceEdgeDefinitionOptions options) { + return executorAsync().execute(() -> replaceEdgeDefinitionRequest(definition, options), replaceEdgeDefinitionResponseDeserializer()); + } + +} diff --git a/core/src/main/java/com/arangodb/internal/ArangoGraphImpl.java b/core/src/main/java/com/arangodb/internal/ArangoGraphImpl.java new file mode 100644 index 000000000..974dbb8ad --- /dev/null +++ b/core/src/main/java/com/arangodb/internal/ArangoGraphImpl.java @@ -0,0 +1,135 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.internal; + +import com.arangodb.*; +import com.arangodb.entity.EdgeDefinition; +import com.arangodb.entity.GraphEntity; +import com.arangodb.model.GraphCreateOptions; +import com.arangodb.model.ReplaceEdgeDefinitionOptions; +import com.arangodb.model.VertexCollectionCreateOptions; + +import java.util.Collection; + +import static com.arangodb.internal.ArangoErrors.ERROR_GRAPH_NOT_FOUND; +import static com.arangodb.internal.ArangoErrors.matches; + +/** + * @author Mark Vollmary + */ +public class ArangoGraphImpl extends InternalArangoGraph implements ArangoGraph { + + private final ArangoDatabase db; + + protected ArangoGraphImpl(final ArangoDatabaseImpl db, final String name) { + super(db, db.name(), name); + this.db = db; + } + + @Override + public ArangoDatabase db() { + return db; + } + + @Override + public boolean exists() { + try { + getInfo(); + return true; + } catch (final ArangoDBException e) { + if (matches(e, 404, ERROR_GRAPH_NOT_FOUND)) { + return false; + } + throw e; + } + } + + @Override + public GraphEntity create(final Iterable edgeDefinitions) { + return db().createGraph(name(), edgeDefinitions); + } + + @Override + public GraphEntity create(final Iterable edgeDefinitions, final GraphCreateOptions options) { + return db().createGraph(name(), edgeDefinitions, options); + } + + @Override + public void drop() { + executorSync().execute(dropRequest(), Void.class); + } + + @Override + public void drop(final boolean dropCollections) { + executorSync().execute(dropRequest(dropCollections), Void.class); + } + + @Override + public GraphEntity getInfo() { + return executorSync().execute(getInfoRequest(), getInfoResponseDeserializer()); + } + + @Override + public Collection getVertexCollections() { + return executorSync().execute(getVertexCollectionsRequest(), getVertexCollectionsResponseDeserializer()); + } + + @Override + public GraphEntity addVertexCollection(final String name) { + return addVertexCollection(name, new VertexCollectionCreateOptions()); + } + + @Override + public GraphEntity addVertexCollection(final String name, final VertexCollectionCreateOptions options) { + return executorSync().execute(addVertexCollectionRequest(name, options), addVertexCollectionResponseDeserializer()); + } + + @Override + public ArangoVertexCollection vertexCollection(final String name) { + return new ArangoVertexCollectionImpl(this, name); + } + + @Override + public ArangoEdgeCollection edgeCollection(final String name) { + return new ArangoEdgeCollectionImpl(this, name); + } + + @Override + public Collection getEdgeDefinitions() { + return executorSync().execute(getEdgeDefinitionsRequest(), getEdgeDefinitionsDeserializer()); + } + + @Override + public GraphEntity addEdgeDefinition(final EdgeDefinition definition) { + return executorSync().execute(addEdgeDefinitionRequest(definition), addEdgeDefinitionResponseDeserializer()); + } + + @Override + public GraphEntity replaceEdgeDefinition(final EdgeDefinition definition) { + return replaceEdgeDefinition(definition, new ReplaceEdgeDefinitionOptions()); + } + + @Override + public GraphEntity replaceEdgeDefinition(final EdgeDefinition definition, final ReplaceEdgeDefinitionOptions options) { + return executorSync().execute(replaceEdgeDefinitionRequest(definition, options), replaceEdgeDefinitionResponseDeserializer()); + } + +} diff --git a/src/main/java/com/arangodb/internal/ArangoCursorExecute.java b/core/src/main/java/com/arangodb/internal/ArangoMetricsImpl.java similarity index 62% rename from src/main/java/com/arangodb/internal/ArangoCursorExecute.java rename to core/src/main/java/com/arangodb/internal/ArangoMetricsImpl.java index 78aa5e009..dd13dea2f 100644 --- a/src/main/java/com/arangodb/internal/ArangoCursorExecute.java +++ b/core/src/main/java/com/arangodb/internal/ArangoMetricsImpl.java @@ -1,37 +1,42 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.internal; - -import com.arangodb.ArangoDBException; -import com.arangodb.entity.CursorEntity; -import com.arangodb.internal.net.HostHandle; - -/** - * @author Mark Vollmary - * - */ -public interface ArangoCursorExecute { - - CursorEntity next(String id, HostHandle hostHandle) throws ArangoDBException; - - void close(String id, HostHandle hostHandle) throws ArangoDBException; - -} +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.internal; + +import com.arangodb.ArangoMetrics; +import com.arangodb.QueueTimeMetrics; + +/** + * @author Michele Rastelli + */ +public class ArangoMetricsImpl implements ArangoMetrics { + + private final QueueTimeMetrics queueTimeMetrics; + + public ArangoMetricsImpl(QueueTimeMetrics queueTimeMetrics) { + this.queueTimeMetrics = queueTimeMetrics; + } + + @Override + public QueueTimeMetrics getQueueTime() { + return queueTimeMetrics; + } + +} diff --git a/core/src/main/java/com/arangodb/internal/ArangoRequestParam.java b/core/src/main/java/com/arangodb/internal/ArangoRequestParam.java new file mode 100644 index 000000000..16c497994 --- /dev/null +++ b/core/src/main/java/com/arangodb/internal/ArangoRequestParam.java @@ -0,0 +1,39 @@ +/* + * DISCLAIMER + * + * Copyright 2018 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.internal; + +/** + * @author Mark Vollmary + */ +public final class ArangoRequestParam { + + public static final String SYSTEM = "_system"; + public static final String DATABASE = "database"; + public static final String WAIT_FOR_SYNC = "waitForSync"; + public static final String IF_NONE_MATCH = "if-none-match"; + public static final String IF_MATCH = "if-match"; + public static final String KEEP_NULL = "keepNull"; + + private ArangoRequestParam() { + super(); + } + +} diff --git a/core/src/main/java/com/arangodb/internal/ArangoResponseField.java b/core/src/main/java/com/arangodb/internal/ArangoResponseField.java new file mode 100644 index 000000000..0388977dc --- /dev/null +++ b/core/src/main/java/com/arangodb/internal/ArangoResponseField.java @@ -0,0 +1,35 @@ +/* + * DISCLAIMER + * + * Copyright 2018 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.internal; + +/** + * @author Mark Vollmary + */ +public final class ArangoResponseField { + + public static final String ERROR_FIELD_NAME = "error"; + public static final String RESULT_JSON_POINTER = "/result"; + + private ArangoResponseField() { + super(); + } + +} diff --git a/core/src/main/java/com/arangodb/internal/ArangoSearchAsyncImpl.java b/core/src/main/java/com/arangodb/internal/ArangoSearchAsyncImpl.java new file mode 100644 index 000000000..b20eceea4 --- /dev/null +++ b/core/src/main/java/com/arangodb/internal/ArangoSearchAsyncImpl.java @@ -0,0 +1,108 @@ +/* + * DISCLAIMER + * + * Copyright 2018 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.internal; + +import com.arangodb.*; +import com.arangodb.entity.ViewEntity; +import com.arangodb.entity.arangosearch.ArangoSearchPropertiesEntity; +import com.arangodb.model.arangosearch.ArangoSearchCreateOptions; +import com.arangodb.model.arangosearch.ArangoSearchPropertiesOptions; + +import java.util.Objects; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CompletionException; + +import static com.arangodb.internal.ArangoErrors.ERROR_ARANGO_DATA_SOURCE_NOT_FOUND; +import static com.arangodb.internal.ArangoErrors.matches; + +/** + * @author Mark Vollmary + */ +public class ArangoSearchAsyncImpl extends InternalArangoSearch implements ArangoSearchAsync { + private final ArangoDatabaseAsync db; + + protected ArangoSearchAsyncImpl(final ArangoDatabaseAsyncImpl db, final String name) { + super(db, db.name(), name); + this.db = db; + } + + @Override + public ArangoDatabaseAsync db() { + return db; + } + + @Override + public CompletableFuture exists() { + return getInfo() + .thenApply(Objects::nonNull) + .exceptionally(err -> { + Throwable e = err instanceof CompletionException ? err.getCause() : err; + if (e instanceof ArangoDBException) { + ArangoDBException aEx = (ArangoDBException) e; + if (matches(aEx, 404, ERROR_ARANGO_DATA_SOURCE_NOT_FOUND)) { + return false; + } + } + throw ArangoDBException.of(e); + }); + } + + @Override + public CompletableFuture drop() { + return executorAsync().execute(this::dropRequest, Void.class); + } + + @Override + public CompletableFuture rename(final String newName) { + return executorAsync().execute(() -> renameRequest(newName), ViewEntity.class); + } + + @Override + public CompletableFuture getInfo() { + return executorAsync().execute(this::getInfoRequest, ViewEntity.class); + } + + @Override + public CompletableFuture create() { + return create(new ArangoSearchCreateOptions()); + } + + @Override + public CompletableFuture create(final ArangoSearchCreateOptions options) { + return db().createArangoSearch(name(), options); + } + + @Override + public CompletableFuture getProperties() { + return executorAsync().execute(this::getPropertiesRequest, ArangoSearchPropertiesEntity.class); + } + + @Override + public CompletableFuture updateProperties(final ArangoSearchPropertiesOptions options) { + return executorAsync().execute(() -> updatePropertiesRequest(options), ArangoSearchPropertiesEntity.class); + } + + @Override + public CompletableFuture replaceProperties(final ArangoSearchPropertiesOptions options) { + return executorAsync().execute(() -> replacePropertiesRequest(options), ArangoSearchPropertiesEntity.class); + } + +} diff --git a/core/src/main/java/com/arangodb/internal/ArangoSearchImpl.java b/core/src/main/java/com/arangodb/internal/ArangoSearchImpl.java new file mode 100644 index 000000000..8c92266d2 --- /dev/null +++ b/core/src/main/java/com/arangodb/internal/ArangoSearchImpl.java @@ -0,0 +1,103 @@ +/* + * DISCLAIMER + * + * Copyright 2018 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.internal; + +import com.arangodb.ArangoDBException; +import com.arangodb.ArangoDatabase; +import com.arangodb.ArangoSearch; +import com.arangodb.entity.ViewEntity; +import com.arangodb.entity.arangosearch.ArangoSearchPropertiesEntity; +import com.arangodb.model.arangosearch.ArangoSearchCreateOptions; +import com.arangodb.model.arangosearch.ArangoSearchPropertiesOptions; + +import static com.arangodb.internal.ArangoErrors.ERROR_ARANGO_DATA_SOURCE_NOT_FOUND; +import static com.arangodb.internal.ArangoErrors.matches; + +/** + * @author Mark Vollmary + */ +public class ArangoSearchImpl extends InternalArangoSearch implements ArangoSearch { + private final ArangoDatabase db; + + protected ArangoSearchImpl(final ArangoDatabaseImpl db, final String name) { + super(db, db.name(), name); + this.db = db; + } + + @Override + public ArangoDatabase db() { + return db; + } + + @Override + public boolean exists() { + try { + getInfo(); + return true; + } catch (final ArangoDBException e) { + if (matches(e, 404, ERROR_ARANGO_DATA_SOURCE_NOT_FOUND)) { + return false; + } + throw e; + } + } + + @Override + public void drop() { + executorSync().execute(dropRequest(), Void.class); + } + + @Override + public ViewEntity rename(final String newName) { + return executorSync().execute(renameRequest(newName), ViewEntity.class); + } + + @Override + public ViewEntity getInfo() { + return executorSync().execute(getInfoRequest(), ViewEntity.class); + } + + @Override + public ViewEntity create() { + return create(new ArangoSearchCreateOptions()); + } + + @Override + public ViewEntity create(final ArangoSearchCreateOptions options) { + return db().createArangoSearch(name(), options); + } + + @Override + public ArangoSearchPropertiesEntity getProperties() { + return executorSync().execute(getPropertiesRequest(), ArangoSearchPropertiesEntity.class); + } + + @Override + public ArangoSearchPropertiesEntity updateProperties(final ArangoSearchPropertiesOptions options) { + return executorSync().execute(updatePropertiesRequest(options), ArangoSearchPropertiesEntity.class); + } + + @Override + public ArangoSearchPropertiesEntity replaceProperties(final ArangoSearchPropertiesOptions options) { + return executorSync().execute(replacePropertiesRequest(options), ArangoSearchPropertiesEntity.class); + } + +} diff --git a/core/src/main/java/com/arangodb/internal/ArangoVertexCollectionAsyncImpl.java b/core/src/main/java/com/arangodb/internal/ArangoVertexCollectionAsyncImpl.java new file mode 100644 index 000000000..ce009d45c --- /dev/null +++ b/core/src/main/java/com/arangodb/internal/ArangoVertexCollectionAsyncImpl.java @@ -0,0 +1,140 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.internal; + +import com.arangodb.ArangoDBException; +import com.arangodb.ArangoGraphAsync; +import com.arangodb.ArangoVertexCollectionAsync; +import com.arangodb.entity.VertexEntity; +import com.arangodb.entity.VertexUpdateEntity; +import com.arangodb.model.*; + +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CompletionException; + +import static com.arangodb.internal.ArangoErrors.*; + +/** + * @author Mark Vollmary + */ +public class ArangoVertexCollectionAsyncImpl extends InternalArangoVertexCollection implements ArangoVertexCollectionAsync { + + private final ArangoGraphAsync graph; + + protected ArangoVertexCollectionAsyncImpl(final ArangoGraphAsyncImpl graph, final String name) { + super(graph, graph.db().name(), graph.name(), name); + this.graph = graph; + } + + @Override + public ArangoGraphAsync graph() { + return graph; + } + + @Deprecated + @Override + public CompletableFuture drop() { + return drop(new VertexCollectionDropOptions()); + } + + @Deprecated + @Override + public CompletableFuture drop(final VertexCollectionDropOptions options) { + return executorAsync().execute(() -> dropRequest(options), Void.class); + } + + @Override + public CompletableFuture remove() { + return remove(new VertexCollectionRemoveOptions()); + } + + @Override + public CompletableFuture remove(final VertexCollectionRemoveOptions options) { + return executorAsync().execute(() -> removeVertexCollectionRequest(options), Void.class); + } + + @Override + public CompletableFuture insertVertex(final Object value) { + return executorAsync().execute(() -> insertVertexRequest(value, new VertexCreateOptions()), + insertVertexResponseDeserializer()); + } + + @Override + public CompletableFuture insertVertex(final Object value, final VertexCreateOptions options) { + return executorAsync().execute(() -> insertVertexRequest(value, options), insertVertexResponseDeserializer()); + } + + @Override + public CompletableFuture getVertex(final String key, final Class type) { + return getVertex(key, type, null); + } + + @Override + public CompletableFuture getVertex(final String key, final Class type, final GraphDocumentReadOptions options) { + return executorAsync().execute(() -> getVertexRequest(key, options), getVertexResponseDeserializer(type)) + .exceptionally(err -> { + Throwable e = err instanceof CompletionException ? err.getCause() : err; + if (e instanceof ArangoDBException) { + ArangoDBException aEx = (ArangoDBException) e; + if (matches(aEx, 304) + || matches(aEx, 404, ERROR_ARANGO_DOCUMENT_NOT_FOUND) + || matches(aEx, 412, ERROR_ARANGO_CONFLICT) + ) { + return null; + } + } + throw ArangoDBException.of(e); + }); + } + + @Override + public CompletableFuture replaceVertex(final String key, final Object value) { + return executorAsync().execute(() -> replaceVertexRequest(key, value, new VertexReplaceOptions()), + replaceVertexResponseDeserializer()); + } + + @Override + public CompletableFuture replaceVertex(final String key, final Object value, final VertexReplaceOptions options) { + return executorAsync().execute(() -> replaceVertexRequest(key, value, options), replaceVertexResponseDeserializer()); + } + + @Override + public CompletableFuture updateVertex(final String key, final Object value) { + return executorAsync().execute(() -> updateVertexRequest(key, value, new VertexUpdateOptions()), + updateVertexResponseDeserializer()); + } + + @Override + public CompletableFuture updateVertex(final String key, final Object value, final VertexUpdateOptions options) { + return executorAsync().execute(() -> updateVertexRequest(key, value, options), updateVertexResponseDeserializer()); + } + + @Override + public CompletableFuture deleteVertex(final String key) { + return executorAsync().execute(() -> deleteVertexRequest(key, new VertexDeleteOptions()), Void.class); + } + + @Override + public CompletableFuture deleteVertex(final String key, final VertexDeleteOptions options) { + return executorAsync().execute(() -> deleteVertexRequest(key, options), Void.class); + } + +} diff --git a/core/src/main/java/com/arangodb/internal/ArangoVertexCollectionImpl.java b/core/src/main/java/com/arangodb/internal/ArangoVertexCollectionImpl.java new file mode 100644 index 000000000..0b0d1ca3a --- /dev/null +++ b/core/src/main/java/com/arangodb/internal/ArangoVertexCollectionImpl.java @@ -0,0 +1,134 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.internal; + +import com.arangodb.ArangoDBException; +import com.arangodb.ArangoGraph; +import com.arangodb.ArangoVertexCollection; +import com.arangodb.entity.VertexEntity; +import com.arangodb.entity.VertexUpdateEntity; +import com.arangodb.model.*; + +import static com.arangodb.internal.ArangoErrors.*; + +/** + * @author Mark Vollmary + */ +public class ArangoVertexCollectionImpl extends InternalArangoVertexCollection implements ArangoVertexCollection { + + private final ArangoGraph graph; + + protected ArangoVertexCollectionImpl(final ArangoGraphImpl graph, final String name) { + super(graph, graph.db().name(), graph.name(), name); + this.graph = graph; + } + + @Override + public ArangoGraph graph() { + return graph; + } + + @Deprecated + @Override + public void drop() { + drop(new VertexCollectionDropOptions()); + } + + @Deprecated + @Override + public void drop(final VertexCollectionDropOptions options) { + executorSync().execute(dropRequest(options), Void.class); + } + + @Override + public void remove() { + remove(new VertexCollectionRemoveOptions()); + } + + @Override + public void remove(final VertexCollectionRemoveOptions options) { + executorSync().execute(removeVertexCollectionRequest(options), Void.class); + } + + @Override + public VertexEntity insertVertex(final Object value) { + return executorSync().execute(insertVertexRequest(value, new VertexCreateOptions()), + insertVertexResponseDeserializer()); + } + + @Override + public VertexEntity insertVertex(final Object value, final VertexCreateOptions options) { + return executorSync().execute(insertVertexRequest(value, options), insertVertexResponseDeserializer()); + } + + @Override + public T getVertex(final String key, final Class type) { + return getVertex(key, type, null); + } + + @Override + public T getVertex(final String key, final Class type, final GraphDocumentReadOptions options) { + try { + return executorSync().execute(getVertexRequest(key, options), getVertexResponseDeserializer(type)); + } catch (final ArangoDBException e) { + if (matches(e, 304) + || matches(e, 404, ERROR_ARANGO_DOCUMENT_NOT_FOUND) + || matches(e, 412, ERROR_ARANGO_CONFLICT) + ) { + return null; + } + throw e; + } + } + + @Override + public VertexUpdateEntity replaceVertex(final String key, final Object value) { + return executorSync().execute(replaceVertexRequest(key, value, new VertexReplaceOptions()), + replaceVertexResponseDeserializer()); + } + + @Override + public VertexUpdateEntity replaceVertex(final String key, final Object value, final VertexReplaceOptions options) { + return executorSync().execute(replaceVertexRequest(key, value, options), replaceVertexResponseDeserializer()); + } + + @Override + public VertexUpdateEntity updateVertex(final String key, final Object value) { + return executorSync().execute(updateVertexRequest(key, value, new VertexUpdateOptions()), + updateVertexResponseDeserializer()); + } + + @Override + public VertexUpdateEntity updateVertex(final String key, final Object value, final VertexUpdateOptions options) { + return executorSync().execute(updateVertexRequest(key, value, options), updateVertexResponseDeserializer()); + } + + @Override + public void deleteVertex(final String key) { + executorSync().execute(deleteVertexRequest(key, new VertexDeleteOptions()), Void.class); + } + + @Override + public void deleteVertex(final String key, final VertexDeleteOptions options) { + executorSync().execute(deleteVertexRequest(key, options), Void.class); + } + +} diff --git a/core/src/main/java/com/arangodb/internal/ArangoViewAsyncImpl.java b/core/src/main/java/com/arangodb/internal/ArangoViewAsyncImpl.java new file mode 100644 index 000000000..6aa25dada --- /dev/null +++ b/core/src/main/java/com/arangodb/internal/ArangoViewAsyncImpl.java @@ -0,0 +1,79 @@ +/* + * DISCLAIMER + * + * Copyright 2018 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.internal; + +import com.arangodb.*; +import com.arangodb.entity.ViewEntity; + +import java.util.Objects; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CompletionException; + +import static com.arangodb.internal.ArangoErrors.ERROR_ARANGO_DATA_SOURCE_NOT_FOUND; +import static com.arangodb.internal.ArangoErrors.matches; + +/** + * @author Mark Vollmary + */ +public class ArangoViewAsyncImpl extends InternalArangoView implements ArangoViewAsync { + private final ArangoDatabaseAsyncImpl db; + protected ArangoViewAsyncImpl(final ArangoDatabaseAsyncImpl db, final String name) { + super(db, db.name(), name); + this.db = db; + } + + @Override + public ArangoDatabaseAsync db() { + return db; + } + + @Override + public CompletableFuture exists() { + return getInfo() + .thenApply(Objects::nonNull) + .exceptionally(err -> { + Throwable e = err instanceof CompletionException ? err.getCause() : err; + if (e instanceof ArangoDBException) { + ArangoDBException aEx = (ArangoDBException) e; + if (matches(aEx, 404, ERROR_ARANGO_DATA_SOURCE_NOT_FOUND)) { + return false; + } + } + throw ArangoDBException.of(e); + }); + } + + @Override + public CompletableFuture drop() { + return executorAsync().execute(this::dropRequest, Void.class); + } + + @Override + public CompletableFuture rename(final String newName) { + return executorAsync().execute(() -> renameRequest(newName), ViewEntity.class); + } + + @Override + public CompletableFuture getInfo() { + return executorAsync().execute(this::getInfoRequest, ViewEntity.class); + } + +} diff --git a/core/src/main/java/com/arangodb/internal/ArangoViewImpl.java b/core/src/main/java/com/arangodb/internal/ArangoViewImpl.java new file mode 100644 index 000000000..9187ffba0 --- /dev/null +++ b/core/src/main/java/com/arangodb/internal/ArangoViewImpl.java @@ -0,0 +1,75 @@ +/* + * DISCLAIMER + * + * Copyright 2018 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.internal; + +import com.arangodb.ArangoDBException; +import com.arangodb.ArangoDatabase; +import com.arangodb.ArangoView; +import com.arangodb.entity.ViewEntity; + +import static com.arangodb.internal.ArangoErrors.ERROR_ARANGO_DATA_SOURCE_NOT_FOUND; +import static com.arangodb.internal.ArangoErrors.matches; + +/** + * @author Mark Vollmary + */ +public class ArangoViewImpl extends InternalArangoView implements ArangoView { + private final ArangoDatabase db; + + protected ArangoViewImpl(final ArangoDatabaseImpl db, final String name) { + super(db, db.name(), name); + this.db = db; + } + + @Override + public ArangoDatabase db() { + return db; + } + + @Override + public boolean exists() { + try { + getInfo(); + return true; + } catch (final ArangoDBException e) { + if (matches(e, 404, ERROR_ARANGO_DATA_SOURCE_NOT_FOUND)) { + return false; + } + throw e; + } + } + + @Override + public void drop() { + executorSync().execute(dropRequest(), Void.class); + } + + @Override + public ViewEntity rename(final String newName) { + return executorSync().execute(renameRequest(newName), ViewEntity.class); + } + + @Override + public ViewEntity getInfo() { + return executorSync().execute(getInfoRequest(), ViewEntity.class); + } + +} diff --git a/core/src/main/java/com/arangodb/internal/DocumentFields.java b/core/src/main/java/com/arangodb/internal/DocumentFields.java new file mode 100644 index 000000000..f2b20e375 --- /dev/null +++ b/core/src/main/java/com/arangodb/internal/DocumentFields.java @@ -0,0 +1,20 @@ +package com.arangodb.internal; + +import java.util.Arrays; +import java.util.List; + +public final class DocumentFields { + + public static final String ID = "_id"; + public static final String KEY = "_key"; + public static final String REV = "_rev"; + public static final String FROM = "_from"; + public static final String TO = "_to"; + + private DocumentFields() { + } + + public static List values() { + return Arrays.asList(ID, KEY, REV, FROM, TO); + } +} diff --git a/core/src/main/java/com/arangodb/internal/InternalArangoCollection.java b/core/src/main/java/com/arangodb/internal/InternalArangoCollection.java new file mode 100644 index 000000000..f794bcd31 --- /dev/null +++ b/core/src/main/java/com/arangodb/internal/InternalArangoCollection.java @@ -0,0 +1,495 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.internal; + +import com.arangodb.ArangoDBException; +import com.arangodb.entity.*; +import com.arangodb.internal.ArangoExecutor.ResponseDeserializer; +import com.arangodb.internal.util.DocumentUtil; +import com.arangodb.internal.util.RequestUtils; +import com.arangodb.model.*; +import com.arangodb.util.RawData; +import com.fasterxml.jackson.databind.JsonNode; + +import java.lang.reflect.Type; +import java.util.ArrayList; +import java.util.Collection; + +import static com.arangodb.internal.serde.SerdeUtils.constructParametricType; + +/** + * @author Mark Vollmary + * @author Michele Rastelli + */ +public abstract class InternalArangoCollection extends ArangoExecuteable { + + protected static final String PATH_API_COLLECTION = "/_api/collection"; + private static final String COLLECTION = "collection"; + private static final String PATH_API_DOCUMENT = "/_api/document"; + private static final String PATH_API_INDEX = "/_api/index"; + private static final String PATH_API_IMPORT = "/_api/import"; + private static final String PATH_API_USER = "/_api/user"; + private static final String MERGE_OBJECTS = "mergeObjects"; + private static final String KEEP_NULL = "keepNull"; + private static final String REFILL_INDEX_CACHES = "refillIndexCaches"; + private static final String VERSION_ATTRIBUTE = "versionAttribute"; + private static final String IGNORE_REVS = "ignoreRevs"; + private static final String RETURN_NEW = "returnNew"; + private static final String RETURN_OLD = "returnOld"; + private static final String OVERWRITE = "overwrite"; + private static final String OVERWRITE_MODE = "overwriteMode"; + private static final String SILENT = "silent"; + + private static final String TRANSACTION_ID = "x-arango-trx-id"; + + protected final String dbName; + protected final String name; + + protected InternalArangoCollection(final ArangoExecuteable executeable, final String dbName, final String name) { + super(executeable); + this.dbName = dbName; + this.name = name; + } + + public String name() { + return name; + } + + protected InternalRequest insertDocumentRequest(final T value, final DocumentCreateOptions options) { + final InternalRequest request = createInsertDocumentRequest(options); + request.setBody(getSerde().serializeUserData(value)); + return request; + } + + protected InternalRequest insertDocumentsRequest(final RawData values, final DocumentCreateOptions options) { + InternalRequest request = createInsertDocumentRequest(options); + request.setBody(getSerde().serialize(values)); + return request; + } + + protected InternalRequest insertDocumentsRequest(final Iterable values, final DocumentCreateOptions options) { + InternalRequest request = createInsertDocumentRequest(options); + request.setBody(getSerde().serializeCollectionUserData(values)); + return request; + } + + private InternalRequest createInsertDocumentRequest(final DocumentCreateOptions options) { + final DocumentCreateOptions params = (options != null ? options : new DocumentCreateOptions()); + final InternalRequest request = request(dbName, RequestType.POST, PATH_API_DOCUMENT, name); + request.putQueryParam(ArangoRequestParam.WAIT_FOR_SYNC, params.getWaitForSync()); + request.putQueryParam(RETURN_NEW, params.getReturnNew()); + request.putQueryParam(RETURN_OLD, params.getReturnOld()); + request.putQueryParam(SILENT, params.getSilent()); + request.putQueryParam(OVERWRITE_MODE, params.getOverwriteMode() != null ? + params.getOverwriteMode().getValue() : null); + request.putQueryParam(MERGE_OBJECTS, params.getMergeObjects()); + request.putQueryParam(KEEP_NULL, params.getKeepNull()); + request.putQueryParam(REFILL_INDEX_CACHES, params.getRefillIndexCaches()); + request.putQueryParam(VERSION_ATTRIBUTE, params.getVersionAttribute()); + request.putHeaderParam(TRANSACTION_ID, params.getStreamTransactionId()); + return request; + } + + protected ResponseDeserializer>> insertDocumentsResponseDeserializer(Class userDataClass) { + return (response) -> { + Type type = constructParametricType(MultiDocumentEntity.class, + constructParametricType(DocumentCreateEntity.class, userDataClass)); + return getSerde().deserialize(response.getBody(), type); + }; + } + + protected InternalRequest importDocumentsRequest(final RawData values, final DocumentImportOptions options) { + return importDocumentsRequest(options).putQueryParam("type", ImportType.auto).setBody(getSerde().serialize(values)); + } + + protected InternalRequest importDocumentsRequest(final Iterable values, final DocumentImportOptions options) { + return importDocumentsRequest(options).putQueryParam("type", ImportType.list) + .setBody(getSerde().serializeCollectionUserData(values)); + } + + protected InternalRequest importDocumentsRequest(final DocumentImportOptions options) { + final DocumentImportOptions params = options != null ? options : new DocumentImportOptions(); + return request(dbName, RequestType.POST, PATH_API_IMPORT).putQueryParam(COLLECTION, name) + .putQueryParam(ArangoRequestParam.WAIT_FOR_SYNC, params.getWaitForSync()) + .putQueryParam("fromPrefix", params.getFromPrefix()).putQueryParam("toPrefix", params.getToPrefix()) + .putQueryParam(OVERWRITE, params.getOverwrite()).putQueryParam("onDuplicate", params.getOnDuplicate()) + .putQueryParam("complete", params.getComplete()).putQueryParam("details", params.getDetails()); + } + + protected InternalRequest getDocumentRequest(final String key, final DocumentReadOptions options) { + final InternalRequest request = request(dbName, RequestType.GET, PATH_API_DOCUMENT, + DocumentUtil.createDocumentHandle(name, key)); + final DocumentReadOptions params = (options != null ? options : new DocumentReadOptions()); + request.putHeaderParam(ArangoRequestParam.IF_NONE_MATCH, params.getIfNoneMatch()); + request.putHeaderParam(ArangoRequestParam.IF_MATCH, params.getIfMatch()); + request.putHeaderParam(TRANSACTION_ID, params.getStreamTransactionId()); + if (Boolean.TRUE.equals(params.getAllowDirtyRead())) { + RequestUtils.allowDirtyRead(request); + } + return request; + } + + protected ResponseDeserializer getDocumentResponseDeserializer(final Class type) { + return (response) -> getSerde().deserializeUserData(response.getBody(), type); + } + + protected InternalRequest getDocumentsRequest(final Iterable keys, final DocumentReadOptions options) { + final DocumentReadOptions params = (options != null ? options : new DocumentReadOptions()); + final InternalRequest request = request(dbName, RequestType.PUT, PATH_API_DOCUMENT, name) + .putQueryParam("onlyget", true) + .putHeaderParam(ArangoRequestParam.IF_NONE_MATCH, params.getIfNoneMatch()) + .putHeaderParam(ArangoRequestParam.IF_MATCH, params.getIfMatch()).setBody(getSerde().serialize(keys)) + .putHeaderParam(TRANSACTION_ID, params.getStreamTransactionId()); + if (Boolean.TRUE.equals(params.getAllowDirtyRead())) { + RequestUtils.allowDirtyRead(request); + } + return request; + } + + protected ResponseDeserializer> getDocumentsResponseDeserializer(final Class type) { + return (response) -> { + MultiDocumentEntity multiDocument = getSerde().deserialize(response.getBody(), + constructParametricType(MultiDocumentEntity.class, type)); + boolean potentialDirtyRead = Boolean.parseBoolean(response.getMeta("X-Arango-Potential-Dirty-Read")); + multiDocument.setPotentialDirtyRead(potentialDirtyRead); + return multiDocument; + }; + } + + protected InternalRequest replaceDocumentRequest( + final String key, final T value, final DocumentReplaceOptions options) { + final InternalRequest request = createReplaceDocumentRequest(options, DocumentUtil.createDocumentHandle(name, key)); + request.setBody(getSerde().serializeUserData(value)); + return request; + } + + protected InternalRequest replaceDocumentsRequest(final Iterable values, final DocumentReplaceOptions options) { + final InternalRequest request = createReplaceDocumentRequest(options, name); + request.setBody(getSerde().serializeCollectionUserData(values)); + return request; + } + + protected InternalRequest replaceDocumentsRequest(final RawData values, final DocumentReplaceOptions options) { + final InternalRequest request = createReplaceDocumentRequest(options, name); + request.setBody(getSerde().serialize(values)); + return request; + } + + private InternalRequest createReplaceDocumentRequest(final DocumentReplaceOptions options, String path) { + final DocumentReplaceOptions params = (options != null ? options : new DocumentReplaceOptions()); + final InternalRequest request = request(dbName, RequestType.PUT, PATH_API_DOCUMENT, path); + request.putHeaderParam(ArangoRequestParam.IF_MATCH, params.getIfMatch()); + request.putHeaderParam(TRANSACTION_ID, params.getStreamTransactionId()); + request.putQueryParam(ArangoRequestParam.WAIT_FOR_SYNC, params.getWaitForSync()); + request.putQueryParam(IGNORE_REVS, params.getIgnoreRevs()); + request.putQueryParam(RETURN_NEW, params.getReturnNew()); + request.putQueryParam(RETURN_OLD, params.getReturnOld()); + request.putQueryParam(SILENT, params.getSilent()); + request.putQueryParam(REFILL_INDEX_CACHES, params.getRefillIndexCaches()); + request.putQueryParam(VERSION_ATTRIBUTE, params.getVersionAttribute()); + return request; + } + + protected ResponseDeserializer>> replaceDocumentsResponseDeserializer( + final Class returnType) { + return (response) -> { + Type type = constructParametricType(MultiDocumentEntity.class, + constructParametricType(DocumentUpdateEntity.class, returnType)); + return getSerde().deserialize(response.getBody(), type); + }; + } + + protected InternalRequest updateDocumentRequest(final String key, final T value, final DocumentUpdateOptions options) { + final InternalRequest request = createUpdateDocumentRequest(options, DocumentUtil.createDocumentHandle(name, key)); + request.setBody(getSerde().serializeUserData(value)); + return request; + } + + protected InternalRequest updateDocumentsRequest(final Iterable values, final DocumentUpdateOptions options) { + final InternalRequest request = createUpdateDocumentRequest(options, name); + request.setBody(getSerde().serializeCollectionUserData(values)); + return request; + } + + protected InternalRequest updateDocumentsRequest(final RawData values, final DocumentUpdateOptions options) { + final InternalRequest request = createUpdateDocumentRequest(options, name); + request.setBody(getSerde().serialize(values)); + return request; + } + + private InternalRequest createUpdateDocumentRequest(final DocumentUpdateOptions options, String path) { + final DocumentUpdateOptions params = (options != null ? options : new DocumentUpdateOptions()); + final InternalRequest request = request(dbName, RequestType.PATCH, PATH_API_DOCUMENT, path); + request.putHeaderParam(ArangoRequestParam.IF_MATCH, params.getIfMatch()); + request.putHeaderParam(TRANSACTION_ID, params.getStreamTransactionId()); + request.putQueryParam(ArangoRequestParam.KEEP_NULL, params.getKeepNull()); + request.putQueryParam(ArangoRequestParam.WAIT_FOR_SYNC, params.getWaitForSync()); + request.putQueryParam(MERGE_OBJECTS, params.getMergeObjects()); + request.putQueryParam(IGNORE_REVS, params.getIgnoreRevs()); + request.putQueryParam(RETURN_NEW, params.getReturnNew()); + request.putQueryParam(RETURN_OLD, params.getReturnOld()); + request.putQueryParam(SILENT, params.getSilent()); + request.putQueryParam(REFILL_INDEX_CACHES, params.getRefillIndexCaches()); + request.putQueryParam(VERSION_ATTRIBUTE, params.getVersionAttribute()); + return request; + } + + protected ResponseDeserializer>> updateDocumentsResponseDeserializer( + final Class returnType) { + return (response) -> { + Type type = constructParametricType(MultiDocumentEntity.class, + constructParametricType(DocumentUpdateEntity.class, returnType)); + return getSerde().deserialize(response.getBody(), type); + }; + } + + protected InternalRequest deleteDocumentRequest(final String key, final DocumentDeleteOptions options) { + return createDeleteDocumentRequest(options, DocumentUtil.createDocumentHandle(name, key)); + } + + protected InternalRequest deleteDocumentsRequest(final Iterable docs, final DocumentDeleteOptions options) { + final InternalRequest request = createDeleteDocumentRequest(options, name); + request.setBody(getSerde().serializeCollectionUserData(docs)); + return request; + } + + protected InternalRequest deleteDocumentsRequest(final RawData docs, final DocumentDeleteOptions options) { + final InternalRequest request = createDeleteDocumentRequest(options, name); + request.setBody(getSerde().serialize(docs)); + return request; + } + + private InternalRequest createDeleteDocumentRequest(final DocumentDeleteOptions options, String path) { + final DocumentDeleteOptions params = (options != null ? options : new DocumentDeleteOptions()); + final InternalRequest request = request(dbName, RequestType.DELETE, PATH_API_DOCUMENT, path); + request.putHeaderParam(ArangoRequestParam.IF_MATCH, params.getIfMatch()); + request.putHeaderParam(TRANSACTION_ID, params.getStreamTransactionId()); + request.putQueryParam(ArangoRequestParam.WAIT_FOR_SYNC, params.getWaitForSync()); + request.putQueryParam(RETURN_OLD, params.getReturnOld()); + request.putQueryParam(SILENT, params.getSilent()); + request.putQueryParam(REFILL_INDEX_CACHES, params.getRefillIndexCaches()); + request.putQueryParam(IGNORE_REVS, params.getIgnoreRevs()); + return request; + } + + protected ResponseDeserializer>> deleteDocumentsResponseDeserializer( + final Class userDataClass) { + return (response) -> { + Type type = constructParametricType(MultiDocumentEntity.class, + constructParametricType(DocumentDeleteEntity.class, userDataClass)); + return getSerde().deserialize(response.getBody(), type); + }; + } + + protected InternalRequest documentExistsRequest(final String key, final DocumentExistsOptions options) { + final InternalRequest request = request(dbName, RequestType.HEAD, PATH_API_DOCUMENT, + DocumentUtil.createDocumentHandle(name, key)); + final DocumentExistsOptions params = (options != null ? options : new DocumentExistsOptions()); + request.putHeaderParam(TRANSACTION_ID, params.getStreamTransactionId()); + request.putHeaderParam(ArangoRequestParam.IF_MATCH, params.getIfMatch()); + request.putHeaderParam(ArangoRequestParam.IF_NONE_MATCH, params.getIfNoneMatch()); + return request; + } + + protected InternalRequest getIndexRequest(final String id) { + return request(dbName, RequestType.GET, PATH_API_INDEX, createIndexId(id)); + } + + protected InternalRequest deleteIndexRequest(final String id) { + return request(dbName, RequestType.DELETE, PATH_API_INDEX, createIndexId(id)); + } + + protected ResponseDeserializer deleteIndexResponseDeserializer() { + return (response) -> getSerde().deserialize(response.getBody(), "/id", String.class); + } + + private String createIndexId(final String id) { + final String index; + if (id.matches(DocumentUtil.REGEX_ID)) { + index = id; + } else if (id.matches(DocumentUtil.REGEX_KEY)) { + index = name + "/" + id; + } else { + throw new ArangoDBException(String.format("index id %s is not valid.", id)); + } + return index; + } + + protected InternalRequest createPersistentIndexRequest( + final Iterable fields, final PersistentIndexOptions options) { + final InternalRequest request = request(dbName, RequestType.POST, PATH_API_INDEX); + request.putQueryParam(COLLECTION, name); + request.setBody(getSerde().serialize( + OptionsBuilder.build(options != null ? options : new PersistentIndexOptions(), fields))); + return request; + } + + protected InternalRequest createInvertedIndexRequest(final InvertedIndexOptions options) { + final InternalRequest request = request(dbName, RequestType.POST, PATH_API_INDEX); + request.putQueryParam(COLLECTION, name); + request.setBody(getSerde().serialize(options)); + return request; + } + + protected InternalRequest createGeoIndexRequest(final Iterable fields, final GeoIndexOptions options) { + final InternalRequest request = request(dbName, RequestType.POST, PATH_API_INDEX); + request.putQueryParam(COLLECTION, name); + request.setBody( + getSerde().serialize(OptionsBuilder.build(options != null ? options : new GeoIndexOptions(), fields))); + return request; + } + + @Deprecated + protected InternalRequest createFulltextIndexRequest(final Iterable fields, final FulltextIndexOptions options) { + final InternalRequest request = request(dbName, RequestType.POST, PATH_API_INDEX); + request.putQueryParam(COLLECTION, name); + request.setBody( + getSerde().serialize(OptionsBuilder.build(options != null ? options : new FulltextIndexOptions(), + fields))); + return request; + } + + protected InternalRequest createTtlIndexRequest(final Iterable fields, final TtlIndexOptions options) { + final InternalRequest request = request(dbName, RequestType.POST, PATH_API_INDEX); + request.putQueryParam(COLLECTION, name); + request.setBody( + getSerde().serialize(OptionsBuilder.build(options != null ? options : new TtlIndexOptions(), fields))); + return request; + } + + protected InternalRequest createZKDIndexRequest( + final Iterable fields, final ZKDIndexOptions options) { + final InternalRequest request = request(dbName, RequestType.POST, PATH_API_INDEX); + request.putQueryParam(COLLECTION, name); + request.setBody(getSerde().serialize(OptionsBuilder.build(options != null ? options : + new ZKDIndexOptions().fieldValueTypes(ZKDIndexOptions.FieldValueTypes.DOUBLE), fields))); + return request; + } + + protected InternalRequest createMDIndexRequest( + final Iterable fields, final AbstractMDIndexOptions options) { + final InternalRequest request = request(dbName, RequestType.POST, PATH_API_INDEX); + request.putQueryParam(COLLECTION, name); + AbstractMDIndexOptions opts = options != null ? options : new MDIndexOptions().fieldValueTypes(MDIFieldValueTypes.DOUBLE); + request.setBody(getSerde().serialize(OptionsBuilder.build(opts, fields))); + return request; + } + + protected InternalRequest getIndexesRequest() { + final InternalRequest request = request(dbName, RequestType.GET, PATH_API_INDEX); + request.putQueryParam(COLLECTION, name); + return request; + } + + protected ResponseDeserializer> getIndexesResponseDeserializer() { + return (response) -> { + Collection indexes = new ArrayList<>(); + for (JsonNode idx : getSerde().parse(response.getBody(), "/indexes")) { + if (!"inverted".equals(idx.get("type").textValue())) { + indexes.add(getSerde().deserialize(idx, IndexEntity.class)); + } + } + return indexes; + }; + } + + protected ResponseDeserializer> getInvertedIndexesResponseDeserializer() { + return (response) -> { + Collection indexes = new ArrayList<>(); + for (JsonNode idx : getSerde().parse(response.getBody(), "/indexes")) { + if ("inverted".equals(idx.get("type").textValue())) { + indexes.add(getSerde().deserialize(idx, InvertedIndexEntity.class)); + } + } + return indexes; + }; + } + + protected InternalRequest truncateRequest(final CollectionTruncateOptions options) { + final InternalRequest request = request(dbName, RequestType.PUT, PATH_API_COLLECTION, name, "truncate"); + final CollectionTruncateOptions params = (options != null ? options : new CollectionTruncateOptions()); + request.putHeaderParam(TRANSACTION_ID, params.getStreamTransactionId()); + return request; + } + + protected InternalRequest countRequest(final CollectionCountOptions options) { + final InternalRequest request = request(dbName, RequestType.GET, PATH_API_COLLECTION, name, "count"); + final CollectionCountOptions params = (options != null ? options : new CollectionCountOptions()); + request.putHeaderParam(TRANSACTION_ID, params.getStreamTransactionId()); + return request; + } + + protected InternalRequest dropRequest(final Boolean isSystem) { + return request(dbName, RequestType.DELETE, PATH_API_COLLECTION, name).putQueryParam("isSystem", isSystem); + } + + protected InternalRequest getInfoRequest() { + return request(dbName, RequestType.GET, PATH_API_COLLECTION, name); + } + + protected InternalRequest getPropertiesRequest() { + return request(dbName, RequestType.GET, PATH_API_COLLECTION, name, "properties"); + } + + protected InternalRequest changePropertiesRequest(final CollectionPropertiesOptions options) { + final InternalRequest request = request(dbName, RequestType.PUT, PATH_API_COLLECTION, name, "properties"); + request.setBody(getSerde().serialize(options != null ? options : new CollectionPropertiesOptions())); + return request; + } + + protected InternalRequest renameRequest(final String newName) { + final InternalRequest request = request(dbName, RequestType.PUT, PATH_API_COLLECTION, name, "rename"); + request.setBody(getSerde().serialize(OptionsBuilder.build(new CollectionRenameOptions(), newName))); + return request; + } + + protected InternalRequest responsibleShardRequest(final T value) { + final InternalRequest request = request(dbName, RequestType.PUT, PATH_API_COLLECTION, name, "responsibleShard"); + request.setBody(getSerde().serializeUserData(value)); + return request; + } + + protected InternalRequest getRevisionRequest() { + return request(dbName, RequestType.GET, PATH_API_COLLECTION, name, "revision"); + } + + protected InternalRequest grantAccessRequest(final String user, final Permissions permissions) { + return request(ArangoRequestParam.SYSTEM, RequestType.PUT, PATH_API_USER, user, ArangoRequestParam.DATABASE, + dbName, name).setBody(getSerde().serialize(OptionsBuilder.build(new UserAccessOptions(), + permissions))); + } + + protected InternalRequest resetAccessRequest(final String user) { + return request(ArangoRequestParam.SYSTEM, RequestType.DELETE, PATH_API_USER, user, ArangoRequestParam.DATABASE, + dbName, name); + } + + protected InternalRequest getPermissionsRequest(final String user) { + return request(ArangoRequestParam.SYSTEM, RequestType.GET, PATH_API_USER, user, ArangoRequestParam.DATABASE, + dbName, name); + } + + protected ResponseDeserializer getPermissionsResponseDeserialzer() { + return (response) -> getSerde().deserialize(response.getBody(), ArangoResponseField.RESULT_JSON_POINTER, + Permissions.class); + } + +} diff --git a/core/src/main/java/com/arangodb/internal/InternalArangoCursor.java b/core/src/main/java/com/arangodb/internal/InternalArangoCursor.java new file mode 100644 index 000000000..2b6a63574 --- /dev/null +++ b/core/src/main/java/com/arangodb/internal/InternalArangoCursor.java @@ -0,0 +1,111 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.internal; + +import com.arangodb.BaseArangoCursor; +import com.arangodb.entity.CursorEntity; + +import java.util.List; + +/** + * @author Mark Vollmary + * @author Michele Rastelli + */ +public abstract class InternalArangoCursor extends ArangoExecuteable implements BaseArangoCursor { + + private static final String PATH_API_CURSOR = "/_api/cursor"; + + private final String dbName; + private final CursorEntity entity; + private final Class type; + private final boolean allowRetry; + + protected InternalArangoCursor( + final ArangoExecuteable executeable, + final String dbName, + final CursorEntity entity, + final Class type, + final Boolean allowRetry + ) { + super(executeable); + this.dbName = dbName; + this.entity = entity; + this.type = type; + this.allowRetry = Boolean.TRUE.equals(allowRetry); + } + + @Override + public String getId() { + return entity.getId(); + } + + @Override + public Integer getCount() { + return entity.getCount(); + } + + @Override + public Boolean isCached() { + return entity.getCached(); + } + + @Override + public Boolean hasMore() { + return entity.getHasMore(); + } + + @Override + public List getResult() { + return entity.getResult(); + } + + @Override + public Boolean isPotentialDirtyRead() { + return entity.isPotentialDirtyRead(); + } + + @Override + public String getNextBatchId() { + return entity.getNextBatchId(); + } + + @Override + public CursorEntity.Extras getExtra() { + return entity.getExtra(); + } + + protected boolean allowRetry() { + return allowRetry; + } + + protected Class getType() { + return type; + } + + protected InternalRequest queryNextRequest() { + return request(dbName, RequestType.POST, PATH_API_CURSOR, entity.getId(), entity.getNextBatchId()); + } + + protected InternalRequest queryCloseRequest() { + return request(dbName, RequestType.DELETE, PATH_API_CURSOR, entity.getId()); + } + +} diff --git a/core/src/main/java/com/arangodb/internal/InternalArangoDB.java b/core/src/main/java/com/arangodb/internal/InternalArangoDB.java new file mode 100644 index 000000000..264c8fdc4 --- /dev/null +++ b/core/src/main/java/com/arangodb/internal/InternalArangoDB.java @@ -0,0 +1,215 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.internal; + +import com.arangodb.Request; +import com.arangodb.Response; +import com.arangodb.entity.LogLevelEntity; +import com.arangodb.entity.Permissions; +import com.arangodb.entity.ServerRole; +import com.arangodb.entity.UserEntity; +import com.arangodb.internal.ArangoExecutor.ResponseDeserializer; +import com.arangodb.internal.config.ArangoConfig; +import com.arangodb.internal.net.CommunicationProtocol; +import com.arangodb.model.*; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Iterator; + +import static com.arangodb.internal.serde.SerdeUtils.constructListType; + +/** + * @author Mark Vollmary + * @author Heiko Kernbach + */ +public abstract class InternalArangoDB extends ArangoExecuteable { + private static final String PATH_API_ADMIN_LOG_ENTRIES = "/_admin/log/entries"; + private static final String PATH_API_ADMIN_LOG_LEVEL = "/_admin/log/level"; + private static final String PATH_API_ROLE = "/_admin/server/role"; + private static final String PATH_API_SERVER_ID = "/_admin/server/id"; + private static final String PATH_API_USER = "/_api/user"; + private static final String PATH_API_QUERY_RULES = "/_api/query/rules"; + + protected InternalArangoDB(final CommunicationProtocol protocol, final ArangoConfig config) { + super(protocol, config); + } + + protected InternalArangoDB(final ArangoExecuteable other) { + super(other); + } + + protected InternalRequest getRoleRequest() { + return request(ArangoRequestParam.SYSTEM, RequestType.GET, PATH_API_ROLE); + } + + protected InternalRequest getServerIdRequest() { + return request(ArangoRequestParam.SYSTEM, RequestType.GET, PATH_API_SERVER_ID); + } + + protected ResponseDeserializer getRoleResponseDeserializer() { + return (response) -> getSerde().deserialize(response.getBody(), "/role", ServerRole.class); + } + + protected ResponseDeserializer getServerIdResponseDeserializer() { + return (response) -> getSerde().deserialize(response.getBody(), "/id", String.class); + } + + protected InternalRequest createDatabaseRequest(final DBCreateOptions options) { + final InternalRequest request = request(ArangoRequestParam.SYSTEM, RequestType.POST, + InternalArangoDatabase.PATH_API_DATABASE); + request.setBody(getSerde().serialize(options)); + return request; + } + + protected ResponseDeserializer createDatabaseResponseDeserializer() { + return (response) -> getSerde().deserialize(response.getBody(), ArangoResponseField.RESULT_JSON_POINTER, + Boolean.class); + } + + protected InternalRequest getDatabasesRequest(final String dbName) { + return request(dbName, RequestType.GET, InternalArangoDatabase.PATH_API_DATABASE); + } + + protected ResponseDeserializer> getDatabaseResponseDeserializer() { + return (response) -> getSerde().deserialize(response.getBody(), ArangoResponseField.RESULT_JSON_POINTER, + constructListType(String.class)); + } + + protected InternalRequest getAccessibleDatabasesForRequest(final String dbName, final String user) { + return request(dbName, RequestType.GET, PATH_API_USER, user, ArangoRequestParam.DATABASE); + } + + protected ResponseDeserializer> getAccessibleDatabasesForResponseDeserializer() { + return (response) -> { + Iterator names = + getSerde().parse(response.getBody(), ArangoResponseField.RESULT_JSON_POINTER).fieldNames(); + final Collection dbs = new ArrayList<>(); + while (names.hasNext()) { + dbs.add(names.next()); + } + return dbs; + }; + } + + protected InternalRequest createUserRequest( + final String dbName, + final String user, + final String passwd, + final UserCreateOptions options) { + final InternalRequest request; + request = request(dbName, RequestType.POST, PATH_API_USER); + request.setBody( + getSerde().serialize(OptionsBuilder.build(options != null ? options : new UserCreateOptions(), user, + passwd))); + return request; + } + + protected InternalRequest deleteUserRequest(final String dbName, final String user) { + return request(dbName, RequestType.DELETE, PATH_API_USER, user); + } + + protected InternalRequest getUsersRequest(final String dbName) { + return request(dbName, RequestType.GET, PATH_API_USER); + } + + protected InternalRequest getUserRequest(final String dbName, final String user) { + return request(dbName, RequestType.GET, PATH_API_USER, user); + } + + protected ResponseDeserializer> getUsersResponseDeserializer() { + return (response) -> getSerde().deserialize(response.getBody(), ArangoResponseField.RESULT_JSON_POINTER, + constructListType(UserEntity.class)); + } + + protected InternalRequest updateUserRequest(final String dbName, final String user, final UserUpdateOptions options) { + final InternalRequest request; + request = request(dbName, RequestType.PATCH, PATH_API_USER, user); + request.setBody(getSerde().serialize(options != null ? options : new UserUpdateOptions())); + return request; + } + + protected InternalRequest replaceUserRequest(final String dbName, final String user, final UserUpdateOptions options) { + final InternalRequest request; + request = request(dbName, RequestType.PUT, PATH_API_USER, user); + request.setBody(getSerde().serialize(options != null ? options : new UserUpdateOptions())); + return request; + } + + protected InternalRequest updateUserDefaultDatabaseAccessRequest(final String user, final Permissions permissions) { + return request(ArangoRequestParam.SYSTEM, RequestType.PUT, PATH_API_USER, user, ArangoRequestParam.DATABASE, + "*").setBody(getSerde().serialize(OptionsBuilder.build(new UserAccessOptions(), permissions))); + } + + protected InternalRequest updateUserDefaultCollectionAccessRequest(final String user, final Permissions permissions) { + return request(ArangoRequestParam.SYSTEM, RequestType.PUT, PATH_API_USER, user, ArangoRequestParam.DATABASE, + "*", "*").setBody(getSerde().serialize(OptionsBuilder.build(new UserAccessOptions(), permissions))); + } + + protected InternalRequest executeRequest(final Request request) { + InternalRequest ireq = new InternalRequest(request.getDb(), RequestType.from(request.getMethod()), request.getPath()); + ireq.putHeaderParams(request.getHeaders()); + ireq.putQueryParams(request.getQueryParams()); + ireq.setBody(getSerde().serializeUserData(request.getBody())); + return ireq; + } + + protected ResponseDeserializer> responseDeserializer(Class type) { + return (response) -> new Response<>( + response.getResponseCode(), + response.getMeta(), + getSerde().deserializeUserData(response.getBody(), type) + ); + } + + protected InternalRequest getLogEntriesRequest(final LogOptions options) { + final LogOptions params = options != null ? options : new LogOptions(); + return request(ArangoRequestParam.SYSTEM, RequestType.GET, PATH_API_ADMIN_LOG_ENTRIES) + .putQueryParam(LogOptions.PROPERTY_UPTO, params.getUpto()) + .putQueryParam(LogOptions.PROPERTY_LEVEL, params.getLevel()) + .putQueryParam(LogOptions.PROPERTY_START, params.getStart()) + .putQueryParam(LogOptions.PROPERTY_SIZE, params.getSize()) + .putQueryParam(LogOptions.PROPERTY_OFFSET, params.getOffset()) + .putQueryParam(LogOptions.PROPERTY_SEARCH, params.getSearch()) + .putQueryParam(LogOptions.PROPERTY_SORT, params.getSort()); + } + + protected InternalRequest getLogLevelRequest(final LogLevelOptions options) { + return request(ArangoRequestParam.SYSTEM, RequestType.GET, PATH_API_ADMIN_LOG_LEVEL) + .putQueryParam("serverId", options.getServerId()); + } + + protected InternalRequest setLogLevelRequest(final LogLevelEntity entity, final LogLevelOptions options) { + return request(ArangoRequestParam.SYSTEM, RequestType.PUT, PATH_API_ADMIN_LOG_LEVEL) + .putQueryParam("serverId", options.getServerId()) + .setBody(getSerde().serialize(entity)); + } + + protected InternalRequest resetLogLevelsRequest(final LogLevelOptions options) { + return request(ArangoRequestParam.SYSTEM, RequestType.DELETE, PATH_API_ADMIN_LOG_LEVEL) + .putQueryParam("serverId", options.getServerId()); + } + + protected InternalRequest getQueryOptimizerRulesRequest() { + return request(ArangoRequestParam.SYSTEM, RequestType.GET, PATH_API_QUERY_RULES); + } + +} diff --git a/core/src/main/java/com/arangodb/internal/InternalArangoDatabase.java b/core/src/main/java/com/arangodb/internal/InternalArangoDatabase.java new file mode 100644 index 000000000..135f4d825 --- /dev/null +++ b/core/src/main/java/com/arangodb/internal/InternalArangoDatabase.java @@ -0,0 +1,391 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.internal; + +import com.arangodb.entity.*; +import com.arangodb.entity.arangosearch.analyzer.SearchAnalyzer; +import com.arangodb.internal.ArangoExecutor.ResponseDeserializer; +import com.arangodb.internal.util.RequestUtils; +import com.arangodb.model.*; +import com.arangodb.model.arangosearch.*; + +import java.util.Collection; +import java.util.Map; + +import static com.arangodb.internal.serde.SerdeUtils.constructListType; +import static com.arangodb.internal.serde.SerdeUtils.constructParametricType; + +/** + * @author Mark Vollmary + * @author Michele Rastelli + */ +public abstract class InternalArangoDatabase extends ArangoExecuteable { + + protected static final String PATH_API_DATABASE = "/_api/database"; + private static final String PATH_API_VERSION = "/_api/version"; + private static final String PATH_API_ENGINE = "/_api/engine"; + private static final String PATH_API_CURSOR = "/_api/cursor"; + private static final String PATH_API_TRANSACTION = "/_api/transaction"; + private static final String PATH_API_BEGIN_STREAM_TRANSACTION = "/_api/transaction/begin"; + private static final String PATH_API_AQLFUNCTION = "/_api/aqlfunction"; + private static final String PATH_API_EXPLAIN = "/_api/explain"; + private static final String PATH_API_QUERY = "/_api/query"; + private static final String PATH_API_QUERY_CACHE = "/_api/query-cache"; + private static final String PATH_API_QUERY_CACHE_PROPERTIES = "/_api/query-cache/properties"; + private static final String PATH_API_QUERY_PROPERTIES = "/_api/query/properties"; + private static final String PATH_API_QUERY_CURRENT = "/_api/query/current"; + private static final String PATH_API_QUERY_SLOW = "/_api/query/slow"; + private static final String PATH_API_ADMIN_ROUTING_RELOAD = "/_admin/routing/reload"; + private static final String PATH_API_USER = "/_api/user"; + + private static final String TRANSACTION_ID = "x-arango-trx-id"; + + private final String name; + + protected InternalArangoDatabase(final ArangoExecuteable executeable, final String name) { + super(executeable); + this.name = name; + } + + public String name() { + return name; + } + + protected ResponseDeserializer> getDatabaseResponseDeserializer() { + return (response) -> getSerde().deserialize(response.getBody(), ArangoResponseField.RESULT_JSON_POINTER, + constructListType(String.class)); + } + + protected InternalRequest getAccessibleDatabasesRequest() { + return request(name, RequestType.GET, PATH_API_DATABASE, "user"); + } + + protected InternalRequest getVersionRequest() { + return request(name, RequestType.GET, PATH_API_VERSION); + } + + protected InternalRequest getEngineRequest() { + return request(name, RequestType.GET, PATH_API_ENGINE); + } + + protected InternalRequest createCollectionRequest(final String name, final CollectionCreateOptions options) { + + byte[] body = getSerde().serialize(OptionsBuilder.build(options != null ? options : + new CollectionCreateOptions(), name)); + + return request(this.name, RequestType.POST, InternalArangoCollection.PATH_API_COLLECTION).setBody(body); + } + + protected InternalRequest getCollectionsRequest(final CollectionsReadOptions options) { + final InternalRequest request; + request = request(name, RequestType.GET, InternalArangoCollection.PATH_API_COLLECTION); + final CollectionsReadOptions params = (options != null ? options : new CollectionsReadOptions()); + request.putQueryParam("excludeSystem", params.getExcludeSystem()); + return request; + } + + protected ResponseDeserializer> getCollectionsResponseDeserializer() { + return (response) -> getSerde().deserialize(response.getBody(), ArangoResponseField.RESULT_JSON_POINTER, + constructListType(CollectionEntity.class)); + } + + protected InternalRequest dropRequest() { + return request(ArangoRequestParam.SYSTEM, RequestType.DELETE, PATH_API_DATABASE, name); + } + + protected ResponseDeserializer createDropResponseDeserializer() { + return (response) -> getSerde().deserialize(response.getBody(), ArangoResponseField.RESULT_JSON_POINTER, + Boolean.class); + } + + protected InternalRequest grantAccessRequest(final String user, final Permissions permissions) { + return request(ArangoRequestParam.SYSTEM, RequestType.PUT, PATH_API_USER, user, ArangoRequestParam.DATABASE, + name).setBody(getSerde().serialize(OptionsBuilder.build(new UserAccessOptions(), permissions))); + } + + protected InternalRequest resetAccessRequest(final String user) { + return request(ArangoRequestParam.SYSTEM, RequestType.DELETE, PATH_API_USER, user, ArangoRequestParam.DATABASE, + name); + } + + protected InternalRequest updateUserDefaultCollectionAccessRequest(final String user, final Permissions permissions) { + return request(ArangoRequestParam.SYSTEM, RequestType.PUT, PATH_API_USER, user, ArangoRequestParam.DATABASE, name + , "*").setBody(getSerde().serialize(OptionsBuilder.build(new UserAccessOptions(), permissions))); + } + + protected InternalRequest getPermissionsRequest(final String user) { + return request(ArangoRequestParam.SYSTEM, RequestType.GET, PATH_API_USER, user, ArangoRequestParam.DATABASE, name); + } + + protected ResponseDeserializer getPermissionsResponseDeserialzer() { + return (response) -> getSerde().deserialize(response.getBody(), ArangoResponseField.RESULT_JSON_POINTER, + Permissions.class); + } + + protected InternalRequest queryRequest(final String query, final Map bindVars, + final AqlQueryOptions options) { + final AqlQueryOptions opt = options != null ? options : new AqlQueryOptions(); + final InternalRequest request = request(name, RequestType.POST, PATH_API_CURSOR) + .setBody(getSerde().serialize(OptionsBuilder.build(opt, query, bindVars))); + if (Boolean.TRUE.equals(opt.getAllowDirtyRead())) { + RequestUtils.allowDirtyRead(request); + } + request.putHeaderParam(TRANSACTION_ID, opt.getStreamTransactionId()); + return request; + } + + protected InternalRequest queryNextRequest(String id, AqlQueryOptions options, String nextBatchId) { + final InternalRequest request = request(name, RequestType.POST, PATH_API_CURSOR, id, nextBatchId); + final AqlQueryOptions opt = options != null ? options : new AqlQueryOptions(); + if (Boolean.TRUE.equals(opt.getAllowDirtyRead())) { + RequestUtils.allowDirtyRead(request); + } + request.putHeaderParam(TRANSACTION_ID, opt.getStreamTransactionId()); + return request; + } + + protected InternalRequest queryCloseRequest(final String id, final AqlQueryOptions options) { + final InternalRequest request = request(name, RequestType.DELETE, PATH_API_CURSOR, id); + final AqlQueryOptions opt = options != null ? options : new AqlQueryOptions(); + if (Boolean.TRUE.equals(opt.getAllowDirtyRead())) { + RequestUtils.allowDirtyRead(request); + } + request.putHeaderParam(TRANSACTION_ID, opt.getStreamTransactionId()); + return request; + } + + protected InternalRequest explainQueryRequest(final String query, final Map bindVars, + final AqlQueryExplainOptions options) { + final AqlQueryExplainOptions opt = options != null ? options : new AqlQueryExplainOptions(); + return request(name, RequestType.POST, PATH_API_EXPLAIN) + .setBody(getSerde().serialize(OptionsBuilder.build(opt, query, bindVars))); + } + + protected InternalRequest explainQueryRequest(final String query, final Map bindVars, + final ExplainAqlQueryOptions options) { + final ExplainAqlQueryOptions opt = options != null ? options : new ExplainAqlQueryOptions(); + return request(name, RequestType.POST, PATH_API_EXPLAIN) + .setBody(getSerde().serialize(OptionsBuilder.build(opt, query, bindVars))); + } + + protected InternalRequest parseQueryRequest(final String query) { + return request(name, RequestType.POST, PATH_API_QUERY).setBody(getSerde().serialize(OptionsBuilder.build(new AqlQueryParseOptions(), query))); + } + + protected InternalRequest clearQueryCacheRequest() { + return request(name, RequestType.DELETE, PATH_API_QUERY_CACHE); + } + + protected InternalRequest getQueryCachePropertiesRequest() { + return request(name, RequestType.GET, PATH_API_QUERY_CACHE_PROPERTIES); + } + + protected InternalRequest setQueryCachePropertiesRequest(final QueryCachePropertiesEntity properties) { + return request(name, RequestType.PUT, PATH_API_QUERY_CACHE_PROPERTIES).setBody(getSerde().serialize(properties)); + } + + protected InternalRequest getQueryTrackingPropertiesRequest() { + return request(name, RequestType.GET, PATH_API_QUERY_PROPERTIES); + } + + protected InternalRequest setQueryTrackingPropertiesRequest(final QueryTrackingPropertiesEntity properties) { + return request(name, RequestType.PUT, PATH_API_QUERY_PROPERTIES).setBody(getSerde().serialize(properties)); + } + + protected InternalRequest getCurrentlyRunningQueriesRequest() { + return request(name, RequestType.GET, PATH_API_QUERY_CURRENT); + } + + protected InternalRequest getSlowQueriesRequest() { + return request(name, RequestType.GET, PATH_API_QUERY_SLOW); + } + + protected InternalRequest clearSlowQueriesRequest() { + return request(name, RequestType.DELETE, PATH_API_QUERY_SLOW); + } + + protected InternalRequest killQueryRequest(final String id) { + return request(name, RequestType.DELETE, PATH_API_QUERY, id); + } + + protected InternalRequest createAqlFunctionRequest(final String name, final String code, + final AqlFunctionCreateOptions options) { + return request(this.name, RequestType.POST, PATH_API_AQLFUNCTION).setBody(getSerde().serialize(OptionsBuilder.build(options != null ? options : new AqlFunctionCreateOptions(), name, code))); + } + + protected InternalRequest deleteAqlFunctionRequest(final String name, final AqlFunctionDeleteOptions options) { + final InternalRequest request = request(this.name, RequestType.DELETE, PATH_API_AQLFUNCTION, name); + final AqlFunctionDeleteOptions params = options != null ? options : new AqlFunctionDeleteOptions(); + request.putQueryParam("group", params.getGroup()); + return request; + } + + public ResponseDeserializer> cursorEntityDeserializer(final Class type) { + return (response) -> { + CursorEntity e = getSerde().deserialize(response.getBody(), constructParametricType(CursorEntity.class, type)); + boolean potentialDirtyRead = Boolean.parseBoolean(response.getMeta("X-Arango-Potential-Dirty-Read")); + e.setPotentialDirtyRead(potentialDirtyRead); + return e; + }; + } + + protected ResponseDeserializer deleteAqlFunctionResponseDeserializer() { + return (response) -> getSerde().deserialize(response.getBody(), "/deletedCount", Integer.class); + } + + protected InternalRequest getAqlFunctionsRequest(final AqlFunctionGetOptions options) { + final InternalRequest request = request(name, RequestType.GET, PATH_API_AQLFUNCTION); + final AqlFunctionGetOptions params = options != null ? options : new AqlFunctionGetOptions(); + request.putQueryParam("namespace", params.getNamespace()); + return request; + } + + protected ResponseDeserializer> getAqlFunctionsResponseDeserializer() { + return (response) -> getSerde().deserialize(response.getBody(), ArangoResponseField.RESULT_JSON_POINTER, + constructListType(AqlFunctionEntity.class)); + } + + protected InternalRequest createGraphRequest(final String name, final Iterable edgeDefinitions, + final GraphCreateOptions options) { + GraphCreateOptions opts = options != null ? options : new GraphCreateOptions(); + return request(this.name, RequestType.POST, InternalArangoGraph.PATH_API_GHARIAL) + .putQueryParam(ArangoRequestParam.WAIT_FOR_SYNC, opts.getWaitForSync()) + .setBody(getSerde().serialize(OptionsBuilder.build(opts, name, edgeDefinitions))); + } + + protected ResponseDeserializer createGraphResponseDeserializer() { + return (response) -> getSerde().deserialize(response.getBody(), "/graph", GraphEntity.class); + } + + protected InternalRequest getGraphsRequest() { + return request(name, RequestType.GET, InternalArangoGraph.PATH_API_GHARIAL); + } + + protected ResponseDeserializer> getGraphsResponseDeserializer() { + return (response) -> getSerde().deserialize(response.getBody(), "/graphs", + constructListType(GraphEntity.class)); + } + + protected InternalRequest transactionRequest(final String action, final TransactionOptions options) { + return request(name, RequestType.POST, PATH_API_TRANSACTION).setBody(getSerde().serialize(OptionsBuilder.build(options != null ? options : new TransactionOptions(), action))); + } + + protected ResponseDeserializer transactionResponseDeserializer(final Class type) { + return (response) -> { + byte[] userContent = getSerde().extract(response.getBody(), ArangoResponseField.RESULT_JSON_POINTER); + return getSerde().deserializeUserData(userContent, type); + }; + } + + protected InternalRequest beginStreamTransactionRequest(final StreamTransactionOptions options) { + StreamTransactionOptions opts = options != null ? options : new StreamTransactionOptions(); + InternalRequest r = request(name, RequestType.POST, PATH_API_BEGIN_STREAM_TRANSACTION).setBody(getSerde().serialize(opts)); + if(Boolean.TRUE.equals(opts.getAllowDirtyRead())) { + RequestUtils.allowDirtyRead(r); + } + return r; + } + + protected InternalRequest abortStreamTransactionRequest(String id) { + return request(name, RequestType.DELETE, PATH_API_TRANSACTION, id); + } + + protected InternalRequest getStreamTransactionsRequest() { + return request(name, RequestType.GET, PATH_API_TRANSACTION); + } + + protected InternalRequest getStreamTransactionRequest(String id) { + return request(name, RequestType.GET, PATH_API_TRANSACTION, id); + } + + protected ResponseDeserializer> transactionsResponseDeserializer() { + return (response) -> getSerde().deserialize(response.getBody(), "/transactions", + constructListType(TransactionEntity.class)); + } + + protected InternalRequest commitStreamTransactionRequest(String id) { + return request(name, RequestType.PUT, PATH_API_TRANSACTION, id); + } + + protected ResponseDeserializer streamTransactionResponseDeserializer() { + return (response) -> getSerde().deserialize(response.getBody(), ArangoResponseField.RESULT_JSON_POINTER, + StreamTransactionEntity.class); + } + + protected InternalRequest getInfoRequest() { + return request(name, RequestType.GET, PATH_API_DATABASE, "current"); + } + + protected ResponseDeserializer getInfoResponseDeserializer() { + return (response) -> getSerde().deserialize(response.getBody(), ArangoResponseField.RESULT_JSON_POINTER, + DatabaseEntity.class); + } + + protected InternalRequest reloadRoutingRequest() { + return request(name, RequestType.POST, PATH_API_ADMIN_ROUTING_RELOAD); + } + + protected InternalRequest getViewsRequest() { + return request(name, RequestType.GET, InternalArangoView.PATH_API_VIEW); + } + + protected ResponseDeserializer> getViewsResponseDeserializer() { + return (response) -> getSerde().deserialize(response.getBody(), ArangoResponseField.RESULT_JSON_POINTER, + constructListType(ViewEntity.class)); + } + + protected InternalRequest createViewRequest(final String name, final ViewType type) { + return request(this.name, RequestType.POST, InternalArangoView.PATH_API_VIEW).setBody(getSerde().serialize(OptionsBuilder.build(new ViewCreateOptions(), name, type))); + } + + protected InternalRequest createArangoSearchRequest(final String name, final ArangoSearchCreateOptions options) { + return request(this.name, RequestType.POST, InternalArangoView.PATH_API_VIEW).setBody(getSerde().serialize(ArangoSearchOptionsBuilder.build(options != null ? options : new ArangoSearchCreateOptions(), name))); + } + + protected InternalRequest createSearchAliasRequest(final String name, final SearchAliasCreateOptions options) { + return request(this.name, RequestType.POST, InternalArangoView.PATH_API_VIEW).setBody(getSerde().serialize( + SearchAliasOptionsBuilder.build(options != null ? options : new SearchAliasCreateOptions(), name))); + } + + protected InternalRequest getAnalyzerRequest(final String name) { + return request(this.name, RequestType.GET, InternalArangoView.PATH_API_ANALYZER, name); + } + + protected InternalRequest getAnalyzersRequest() { + return request(name, RequestType.GET, InternalArangoView.PATH_API_ANALYZER); + } + + protected ResponseDeserializer> getSearchAnalyzersResponseDeserializer() { + return (response) -> getSerde().deserialize(response.getBody(), ArangoResponseField.RESULT_JSON_POINTER, + constructListType(SearchAnalyzer.class)); + } + + protected InternalRequest createAnalyzerRequest(final SearchAnalyzer options) { + return request(name, RequestType.POST, InternalArangoView.PATH_API_ANALYZER).setBody(getSerde().serialize(options)); + } + + protected InternalRequest deleteAnalyzerRequest(final String name, final AnalyzerDeleteOptions options) { + InternalRequest request = request(this.name, RequestType.DELETE, InternalArangoView.PATH_API_ANALYZER, name); + request.putQueryParam("force", options != null ? options.getForce() : null); + return request; + } + +} diff --git a/core/src/main/java/com/arangodb/internal/InternalArangoEdgeCollection.java b/core/src/main/java/com/arangodb/internal/InternalArangoEdgeCollection.java new file mode 100644 index 000000000..2c035f435 --- /dev/null +++ b/core/src/main/java/com/arangodb/internal/InternalArangoEdgeCollection.java @@ -0,0 +1,144 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.internal; + +import com.arangodb.entity.EdgeEntity; +import com.arangodb.entity.EdgeUpdateEntity; +import com.arangodb.internal.ArangoExecutor.ResponseDeserializer; +import com.arangodb.internal.util.DocumentUtil; +import com.arangodb.internal.util.RequestUtils; +import com.arangodb.model.*; + +/** + * @author Mark Vollmary + */ +public abstract class InternalArangoEdgeCollection extends ArangoExecuteable { + + private static final String PATH_API_GHARIAL = "/_api/gharial"; + private static final String TRANSACTION_ID = "x-arango-trx-id"; + private static final String EDGE_PATH = "edge"; + private static final String EDGE_JSON_POINTER = "/edge"; + + private final String dbName; + private final String graphName; + private final String name; + + protected InternalArangoEdgeCollection(final ArangoExecuteable executeable, + final String dbName, + final String graphName, + final String name) { + super(executeable); + this.dbName = dbName; + this.graphName = graphName; + this.name = name; + } + + public String name() { + return name; + } + + @Deprecated + protected InternalRequest removeEdgeDefinitionRequest(final EdgeCollectionDropOptions options) { + return request(dbName, RequestType.DELETE, PATH_API_GHARIAL, graphName, "edge", name) + .putQueryParam("waitForSync", options.getWaitForSync()) + .putQueryParam("dropCollections", options.getDropCollections()); + } + + protected InternalRequest removeEdgeDefinitionRequest(final EdgeCollectionRemoveOptions options) { + return request(dbName, RequestType.DELETE, PATH_API_GHARIAL, graphName, "edge", name) + .putQueryParam("waitForSync", options.getWaitForSync()) + .putQueryParam("dropCollections", options.getDropCollections()); + } + + protected InternalRequest insertEdgeRequest(final T value, final EdgeCreateOptions options) { + final InternalRequest request = request(dbName, RequestType.POST, PATH_API_GHARIAL, graphName, EDGE_PATH, + name); + final EdgeCreateOptions params = (options != null ? options : new EdgeCreateOptions()); + request.putHeaderParam(TRANSACTION_ID, params.getStreamTransactionId()); + request.putQueryParam(ArangoRequestParam.WAIT_FOR_SYNC, params.getWaitForSync()); + request.setBody(getSerde().serializeUserData(value)); + return request; + } + + protected ResponseDeserializer insertEdgeResponseDeserializer() { + return (response) -> getSerde().deserialize(response.getBody(), EDGE_JSON_POINTER, EdgeEntity.class); + } + + protected InternalRequest getEdgeRequest(final String key, final GraphDocumentReadOptions options) { + final InternalRequest request = request(dbName, RequestType.GET, PATH_API_GHARIAL, graphName, EDGE_PATH, + DocumentUtil.createDocumentHandle(name, key)); + final GraphDocumentReadOptions params = (options != null ? options : new GraphDocumentReadOptions()); + request.putHeaderParam(TRANSACTION_ID, params.getStreamTransactionId()); + request.putHeaderParam(ArangoRequestParam.IF_NONE_MATCH, params.getIfNoneMatch()); + request.putHeaderParam(ArangoRequestParam.IF_MATCH, params.getIfMatch()); + if (Boolean.TRUE.equals(params.getAllowDirtyRead())) { + RequestUtils.allowDirtyRead(request); + } + return request; + } + + protected ResponseDeserializer getEdgeResponseDeserializer(final Class type) { + return (response) -> getSerde().deserializeUserData(getSerde().extract(response.getBody(), EDGE_JSON_POINTER), type); + } + + protected InternalRequest replaceEdgeRequest(final String key, final T value, final EdgeReplaceOptions options) { + final InternalRequest request = request(dbName, RequestType.PUT, PATH_API_GHARIAL, graphName, EDGE_PATH, + DocumentUtil.createDocumentHandle(name, key)); + final EdgeReplaceOptions params = (options != null ? options : new EdgeReplaceOptions()); + request.putHeaderParam(TRANSACTION_ID, params.getStreamTransactionId()); + request.putQueryParam(ArangoRequestParam.WAIT_FOR_SYNC, params.getWaitForSync()); + request.putHeaderParam(ArangoRequestParam.IF_MATCH, params.getIfMatch()); + request.setBody(getSerde().serializeUserData(value)); + return request; + } + + protected ResponseDeserializer replaceEdgeResponseDeserializer() { + return (response) -> getSerde().deserialize(response.getBody(), EDGE_JSON_POINTER, EdgeUpdateEntity.class); + } + + protected InternalRequest updateEdgeRequest(final String key, final T value, final EdgeUpdateOptions options) { + final InternalRequest request; + request = request(dbName, RequestType.PATCH, PATH_API_GHARIAL, graphName, EDGE_PATH, + DocumentUtil.createDocumentHandle(name, key)); + final EdgeUpdateOptions params = (options != null ? options : new EdgeUpdateOptions()); + request.putHeaderParam(TRANSACTION_ID, params.getStreamTransactionId()); + request.putQueryParam(ArangoRequestParam.KEEP_NULL, params.getKeepNull()); + request.putQueryParam(ArangoRequestParam.WAIT_FOR_SYNC, params.getWaitForSync()); + request.putHeaderParam(ArangoRequestParam.IF_MATCH, params.getIfMatch()); + request.setBody(getSerde().serializeUserData(value)); + return request; + } + + protected ResponseDeserializer updateEdgeResponseDeserializer() { + return (response) -> getSerde().deserialize(response.getBody(), EDGE_JSON_POINTER, EdgeUpdateEntity.class); + } + + protected InternalRequest deleteEdgeRequest(final String key, final EdgeDeleteOptions options) { + final InternalRequest request = request(dbName, RequestType.DELETE, PATH_API_GHARIAL, graphName, EDGE_PATH, + DocumentUtil.createDocumentHandle(name, key)); + final EdgeDeleteOptions params = (options != null ? options : new EdgeDeleteOptions()); + request.putHeaderParam(TRANSACTION_ID, params.getStreamTransactionId()); + request.putQueryParam(ArangoRequestParam.WAIT_FOR_SYNC, params.getWaitForSync()); + request.putHeaderParam(ArangoRequestParam.IF_MATCH, params.getIfMatch()); + return request; + } + +} diff --git a/core/src/main/java/com/arangodb/internal/InternalArangoGraph.java b/core/src/main/java/com/arangodb/internal/InternalArangoGraph.java new file mode 100644 index 000000000..45e4a7bd3 --- /dev/null +++ b/core/src/main/java/com/arangodb/internal/InternalArangoGraph.java @@ -0,0 +1,128 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.internal; + +import com.arangodb.entity.EdgeDefinition; +import com.arangodb.entity.GraphEntity; +import com.arangodb.internal.ArangoExecutor.ResponseDeserializer; +import com.arangodb.model.OptionsBuilder; +import com.arangodb.model.ReplaceEdgeDefinitionOptions; +import com.arangodb.model.VertexCollectionCreateOptions; + +import java.util.Collection; + +import static com.arangodb.internal.serde.SerdeUtils.constructListType; + +/** + * @author Mark Vollmary + */ +public abstract class InternalArangoGraph extends ArangoExecuteable { + + protected static final String PATH_API_GHARIAL = "/_api/gharial"; + private static final String GRAPH = "/graph"; + private static final String VERTEX = "vertex"; + private static final String EDGE = "edge"; + + protected final String dbName; + protected final String name; + + protected InternalArangoGraph(final ArangoExecuteable executeable, final String dbName, final String name) { + super(executeable); + this.dbName = dbName; + this.name = name; + } + + public String name() { + return name; + } + + protected InternalRequest dropRequest() { + return dropRequest(false); + } + + protected InternalRequest dropRequest(final boolean dropCollections) { + final InternalRequest request = request(dbName, RequestType.DELETE, PATH_API_GHARIAL, name); + if (dropCollections) { + request.putQueryParam("dropCollections", true); + } + return request; + } + + protected InternalRequest getInfoRequest() { + return request(dbName, RequestType.GET, PATH_API_GHARIAL, name); + } + + protected ResponseDeserializer getInfoResponseDeserializer() { + return addVertexCollectionResponseDeserializer(); + } + + protected InternalRequest getVertexCollectionsRequest() { + return request(dbName, RequestType.GET, PATH_API_GHARIAL, name, VERTEX); + } + + protected ResponseDeserializer> getVertexCollectionsResponseDeserializer() { + return (response) -> getSerde().deserialize(response.getBody(), "/collections", + constructListType(String.class)); + } + + protected InternalRequest addVertexCollectionRequest(final String name, final VertexCollectionCreateOptions options) { + final InternalRequest request = request(dbName, RequestType.POST, PATH_API_GHARIAL, name(), VERTEX); + request.setBody(getSerde().serialize(OptionsBuilder.build(options, name))); + return request; + } + + protected ResponseDeserializer addVertexCollectionResponseDeserializer() { + return addEdgeDefinitionResponseDeserializer(); + } + + protected InternalRequest getEdgeDefinitionsRequest() { + return request(dbName, RequestType.GET, PATH_API_GHARIAL, name, EDGE); + } + + protected ResponseDeserializer> getEdgeDefinitionsDeserializer() { + return (response) -> getSerde().deserialize(response.getBody(), "/collections", + constructListType(String.class)); + } + + protected InternalRequest addEdgeDefinitionRequest(final EdgeDefinition definition) { + final InternalRequest request = request(dbName, RequestType.POST, PATH_API_GHARIAL, name, EDGE); + request.setBody(getSerde().serialize(definition)); + return request; + } + + protected ResponseDeserializer addEdgeDefinitionResponseDeserializer() { + return (response) -> getSerde().deserialize(response.getBody(), GRAPH, GraphEntity.class); + } + + protected InternalRequest replaceEdgeDefinitionRequest(final EdgeDefinition definition, final ReplaceEdgeDefinitionOptions options) { + final InternalRequest request = + request(dbName, RequestType.PUT, PATH_API_GHARIAL, name, EDGE, definition.getCollection()) + .putQueryParam("waitForSync", options.getWaitForSync()) + .putQueryParam("dropCollections", options.getDropCollections()); + request.setBody(getSerde().serialize(definition)); + return request; + } + + protected ResponseDeserializer replaceEdgeDefinitionResponseDeserializer() { + return (response) -> getSerde().deserialize(response.getBody(), GRAPH, GraphEntity.class); + } + +} diff --git a/core/src/main/java/com/arangodb/internal/InternalArangoSearch.java b/core/src/main/java/com/arangodb/internal/InternalArangoSearch.java new file mode 100644 index 000000000..17209f83a --- /dev/null +++ b/core/src/main/java/com/arangodb/internal/InternalArangoSearch.java @@ -0,0 +1,52 @@ +/* + * DISCLAIMER + * + * Copyright 2018 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.internal; + +import com.arangodb.model.arangosearch.ArangoSearchPropertiesOptions; + +/** + * @author Mark Vollmary + */ +public class InternalArangoSearch extends InternalArangoView { + + private static final String PROPERTIES_PATH = "properties"; + + protected InternalArangoSearch(final ArangoExecuteable executeable, final String dbName, final String name) { + super(executeable, dbName, name); + } + + protected InternalRequest getPropertiesRequest() { + return request(dbName, RequestType.GET, PATH_API_VIEW, name, PROPERTIES_PATH); + } + + protected InternalRequest replacePropertiesRequest(final ArangoSearchPropertiesOptions options) { + final InternalRequest request = request(dbName, RequestType.PUT, PATH_API_VIEW, name, PROPERTIES_PATH); + request.setBody(getSerde().serialize(options != null ? options : new ArangoSearchPropertiesOptions())); + return request; + } + + protected InternalRequest updatePropertiesRequest(final ArangoSearchPropertiesOptions options) { + final InternalRequest request = request(dbName, RequestType.PATCH, PATH_API_VIEW, name, PROPERTIES_PATH); + request.setBody(getSerde().serialize(options != null ? options : new ArangoSearchPropertiesOptions())); + return request; + } + +} diff --git a/core/src/main/java/com/arangodb/internal/InternalArangoVertexCollection.java b/core/src/main/java/com/arangodb/internal/InternalArangoVertexCollection.java new file mode 100644 index 000000000..991201661 --- /dev/null +++ b/core/src/main/java/com/arangodb/internal/InternalArangoVertexCollection.java @@ -0,0 +1,143 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.internal; + +import com.arangodb.entity.VertexEntity; +import com.arangodb.entity.VertexUpdateEntity; +import com.arangodb.internal.ArangoExecutor.ResponseDeserializer; +import com.arangodb.internal.util.DocumentUtil; +import com.arangodb.internal.util.RequestUtils; +import com.arangodb.model.*; + +/** + * @author Mark Vollmary + */ +public abstract class InternalArangoVertexCollection extends ArangoExecuteable { + + private static final String PATH_API_GHARIAL = "/_api/gharial"; + private static final String VERTEX_PATH = "vertex"; + private static final String VERTEX_JSON_POINTER = "/vertex"; + private static final String TRANSACTION_ID = "x-arango-trx-id"; + + private final String dbName; + private final String graphName; + private final String name; + + protected InternalArangoVertexCollection(final ArangoExecuteable executeable, + final String dbName, + final String graphName, + final String name) { + super(executeable); + this.dbName = dbName; + this.graphName = graphName; + this.name = name; + } + + public String name() { + return name; + } + + @Deprecated + protected InternalRequest dropRequest(final VertexCollectionDropOptions options) { + return request(dbName, RequestType.DELETE, PATH_API_GHARIAL, graphName, VERTEX_PATH, name) + .putQueryParam("dropCollection", options.getDropCollection()); + } + + protected InternalRequest removeVertexCollectionRequest(final VertexCollectionRemoveOptions options) { + return request(dbName, RequestType.DELETE, PATH_API_GHARIAL, graphName, VERTEX_PATH, name) + .putQueryParam("dropCollection", options.getDropCollection()); + } + + protected InternalRequest insertVertexRequest(final T value, final VertexCreateOptions options) { + final InternalRequest request = request(dbName, RequestType.POST, PATH_API_GHARIAL, graphName, VERTEX_PATH, + name); + final VertexCreateOptions params = (options != null ? options : new VertexCreateOptions()); + request.putHeaderParam(TRANSACTION_ID, params.getStreamTransactionId()); + request.putQueryParam(ArangoRequestParam.WAIT_FOR_SYNC, params.getWaitForSync()); + request.setBody(getSerde().serializeUserData(value)); + return request; + } + + protected ResponseDeserializer insertVertexResponseDeserializer() { + return (response) -> getSerde().deserialize(response.getBody(), VERTEX_JSON_POINTER, VertexEntity.class); + } + + protected InternalRequest getVertexRequest(final String key, final GraphDocumentReadOptions options) { + final InternalRequest request = request(dbName, RequestType.GET, PATH_API_GHARIAL, graphName, VERTEX_PATH, + DocumentUtil.createDocumentHandle(name, key)); + final GraphDocumentReadOptions params = (options != null ? options : new GraphDocumentReadOptions()); + request.putHeaderParam(TRANSACTION_ID, params.getStreamTransactionId()); + request.putHeaderParam(ArangoRequestParam.IF_NONE_MATCH, params.getIfNoneMatch()); + request.putHeaderParam(ArangoRequestParam.IF_MATCH, params.getIfMatch()); + if (Boolean.TRUE.equals(params.getAllowDirtyRead())) { + RequestUtils.allowDirtyRead(request); + } + return request; + } + + protected ResponseDeserializer getVertexResponseDeserializer(final Class type) { + return (response) -> getSerde().deserializeUserData(getSerde().extract(response.getBody(), VERTEX_JSON_POINTER), type); + } + + protected InternalRequest replaceVertexRequest(final String key, final T value, final VertexReplaceOptions options) { + final InternalRequest request = request(dbName, RequestType.PUT, PATH_API_GHARIAL, graphName, VERTEX_PATH, + DocumentUtil.createDocumentHandle(name, key)); + final VertexReplaceOptions params = (options != null ? options : new VertexReplaceOptions()); + request.putHeaderParam(TRANSACTION_ID, params.getStreamTransactionId()); + request.putQueryParam(ArangoRequestParam.WAIT_FOR_SYNC, params.getWaitForSync()); + request.putHeaderParam(ArangoRequestParam.IF_MATCH, params.getIfMatch()); + request.setBody(getSerde().serializeUserData(value)); + return request; + } + + protected ResponseDeserializer replaceVertexResponseDeserializer() { + return (response) -> getSerde().deserialize(response.getBody(), VERTEX_JSON_POINTER, VertexUpdateEntity.class); + } + + protected InternalRequest updateVertexRequest(final String key, final T value, final VertexUpdateOptions options) { + final InternalRequest request; + request = request(dbName, RequestType.PATCH, PATH_API_GHARIAL, graphName, VERTEX_PATH, + DocumentUtil.createDocumentHandle(name, key)); + final VertexUpdateOptions params = (options != null ? options : new VertexUpdateOptions()); + request.putHeaderParam(TRANSACTION_ID, params.getStreamTransactionId()); + request.putQueryParam(ArangoRequestParam.KEEP_NULL, params.getKeepNull()); + request.putQueryParam(ArangoRequestParam.WAIT_FOR_SYNC, params.getWaitForSync()); + request.putHeaderParam(ArangoRequestParam.IF_MATCH, params.getIfMatch()); + request.setBody(getSerde().serializeUserData(value)); + return request; + } + + protected ResponseDeserializer updateVertexResponseDeserializer() { + return (response) -> getSerde().deserialize(response.getBody(), VERTEX_JSON_POINTER, VertexUpdateEntity.class); + } + + protected InternalRequest deleteVertexRequest(final String key, final VertexDeleteOptions options) { + final InternalRequest request = request(dbName, RequestType.DELETE, PATH_API_GHARIAL, graphName, + VERTEX_PATH, + DocumentUtil.createDocumentHandle(name, key)); + final VertexDeleteOptions params = (options != null ? options : new VertexDeleteOptions()); + request.putHeaderParam(TRANSACTION_ID, params.getStreamTransactionId()); + request.putQueryParam(ArangoRequestParam.WAIT_FOR_SYNC, params.getWaitForSync()); + request.putHeaderParam(ArangoRequestParam.IF_MATCH, params.getIfMatch()); + return request; + } + +} diff --git a/core/src/main/java/com/arangodb/internal/InternalArangoView.java b/core/src/main/java/com/arangodb/internal/InternalArangoView.java new file mode 100644 index 000000000..9ab3cffab --- /dev/null +++ b/core/src/main/java/com/arangodb/internal/InternalArangoView.java @@ -0,0 +1,64 @@ +/* + * DISCLAIMER + * + * Copyright 2018 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.internal; + +import com.arangodb.model.OptionsBuilder; +import com.arangodb.model.ViewRenameOptions; + +/** + * @author Mark Vollmary + * @author Michele Rastelli + */ +public abstract class InternalArangoView extends ArangoExecuteable { + + protected static final String PATH_API_VIEW = "/_api/view"; + protected static final String PATH_API_ANALYZER = "/_api/analyzer"; + + protected final String dbName; + protected final String name; + + protected InternalArangoView(final ArangoExecuteable executeable, + final String dbName, + final String name) { + super(executeable); + this.dbName = dbName; + this.name = name; + } + + public String name() { + return name; + } + + protected InternalRequest dropRequest() { + return request(dbName, RequestType.DELETE, PATH_API_VIEW, name); + } + + protected InternalRequest renameRequest(final String newName) { + final InternalRequest request = request(dbName, RequestType.PUT, PATH_API_VIEW, name, "rename"); + request.setBody(getSerde().serialize(OptionsBuilder.build(new ViewRenameOptions(), newName))); + return request; + } + + protected InternalRequest getInfoRequest() { + return request(dbName, RequestType.GET, PATH_API_VIEW, name); + } + +} diff --git a/core/src/main/java/com/arangodb/internal/InternalRequest.java b/core/src/main/java/com/arangodb/internal/InternalRequest.java new file mode 100644 index 000000000..f219c7723 --- /dev/null +++ b/core/src/main/java/com/arangodb/internal/InternalRequest.java @@ -0,0 +1,149 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.internal; + +import com.arangodb.arch.UsedInApi; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Locale; +import java.util.Map; + +/** + * @author Mark Vollmary + */ +@UsedInApi +public class InternalRequest { + + private final String dbName; + private final RequestType requestType; + private final String path; + private final Map queryParam; + private final Map headerParam; + private int version = 1; + private int type = 1; + private byte[] body; + + public InternalRequest(final String dbName, final RequestType requestType, final String path) { + super(); + this.dbName = dbName; + this.requestType = requestType; + this.path = path; + body = null; + queryParam = new HashMap<>(); + headerParam = new HashMap<>(); + } + + public int getVersion() { + return version; + } + + public InternalRequest setVersion(final int version) { + this.version = version; + return this; + } + + public int getType() { + return type; + } + + public InternalRequest setType(final int type) { + this.type = type; + return this; + } + + public String getDbName() { + return dbName; + } + + public RequestType getRequestType() { + return requestType; + } + + public String getPath() { + return path; + } + + public Map getQueryParam() { + return queryParam; + } + + public InternalRequest putQueryParam(final String key, final Object value) { + if (value != null) { + queryParam.put(key, value.toString()); + } + return this; + } + + public InternalRequest putQueryParams(final Map params) { + if (params != null) { + for (Map.Entry it : params.entrySet()) { + putQueryParam(it.getKey(), it.getValue()); + } + } + return this; + } + + public Map getHeaderParam() { + return Collections.unmodifiableMap(headerParam); + } + + public boolean containsHeaderParam(final String key) { + return headerParam.containsKey(key.toLowerCase(Locale.ROOT)); + } + + public InternalRequest putHeaderParam(final String key, final String value) { + if (value != null) { + headerParam.put(key.toLowerCase(Locale.ROOT), value); + } + return this; + } + + public InternalRequest putHeaderParams(final Map params) { + if (params != null) { + for (Map.Entry it : params.entrySet()) { + putHeaderParam(it.getKey(), it.getValue()); + } + } + return this; + } + + public byte[] getBody() { + return body; + } + + public InternalRequest setBody(final byte[] body) { + this.body = body; + return this; + } + + @Override + public String toString() { + return "{" + + "requestType=" + requestType + + ", database='" + dbName + '\'' + + ", url='" + path + '\'' + + ", parameters=" + queryParam + + ", headers=" + headerParam + + '}'; + } + +} diff --git a/core/src/main/java/com/arangodb/internal/InternalResponse.java b/core/src/main/java/com/arangodb/internal/InternalResponse.java new file mode 100644 index 000000000..9e57b697d --- /dev/null +++ b/core/src/main/java/com/arangodb/internal/InternalResponse.java @@ -0,0 +1,108 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.internal; + +import com.arangodb.arch.UsedInApi; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Locale; +import java.util.Map; + +/** + * @author Mark Vollmary + */ +@UsedInApi +public class InternalResponse { + + private int version = 1; + private int type = 2; + private int responseCode; + private final Map meta; + private byte[] body = null; + + public InternalResponse() { + super(); + meta = new HashMap<>(); + } + + public int getVersion() { + return version; + } + + public void setVersion(final int version) { + this.version = version; + } + + public int getType() { + return type; + } + + public void setType(final int type) { + this.type = type; + } + + public int getResponseCode() { + return responseCode; + } + + public void setResponseCode(final int responseCode) { + this.responseCode = responseCode; + } + + public Map getMeta() { + return Collections.unmodifiableMap(meta); + } + + public String getMeta(final String key) { + return meta.get(key.toLowerCase(Locale.ROOT)); + } + + public boolean containsMeta(final String key) { + return meta.containsKey(key.toLowerCase(Locale.ROOT)); + } + + public void putMeta(final String key, final String value) { + this.meta.put(key.toLowerCase(Locale.ROOT), value); + } + + public void putMetas(final Map meta) { + for (Map.Entry it : meta.entrySet()) { + putMeta(it.getKey(), it.getValue()); + } + } + + public byte[] getBody() { + return body; + } + + public void setBody(final byte[] body) { + this.body = body; + } + + @Override + public String toString() { + return "{" + + "statusCode=" + responseCode + + ", headers=" + meta + + '}'; + } +} diff --git a/core/src/main/java/com/arangodb/internal/InternalSearchAlias.java b/core/src/main/java/com/arangodb/internal/InternalSearchAlias.java new file mode 100644 index 000000000..a2fd238e5 --- /dev/null +++ b/core/src/main/java/com/arangodb/internal/InternalSearchAlias.java @@ -0,0 +1,49 @@ +/* + * DISCLAIMER + * + * Copyright 2018 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.internal; + +import com.arangodb.model.arangosearch.SearchAliasPropertiesOptions; + +public class InternalSearchAlias extends InternalArangoView { + + private static final String PROPERTIES_PATH = "properties"; + + protected InternalSearchAlias(final ArangoExecuteable executeable, final String dbName, final String name) { + super(executeable, dbName, name); + } + + protected InternalRequest getPropertiesRequest() { + return request(dbName, RequestType.GET, PATH_API_VIEW, name, PROPERTIES_PATH); + } + + protected InternalRequest replacePropertiesRequest(final SearchAliasPropertiesOptions options) { + final InternalRequest request = request(dbName, RequestType.PUT, PATH_API_VIEW, name, PROPERTIES_PATH); + request.setBody(getSerde().serialize(options != null ? options : new SearchAliasPropertiesOptions())); + return request; + } + + protected InternalRequest updatePropertiesRequest(final SearchAliasPropertiesOptions options) { + final InternalRequest request = request(dbName, RequestType.PATCH, PATH_API_VIEW, name, PROPERTIES_PATH); + request.setBody(getSerde().serialize(options != null ? options : new SearchAliasPropertiesOptions())); + return request; + } + +} diff --git a/core/src/main/java/com/arangodb/internal/QueueTimeMetricsImpl.java b/core/src/main/java/com/arangodb/internal/QueueTimeMetricsImpl.java new file mode 100644 index 000000000..c3ac91a19 --- /dev/null +++ b/core/src/main/java/com/arangodb/internal/QueueTimeMetricsImpl.java @@ -0,0 +1,134 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.internal; + +import com.arangodb.QueueTimeMetrics; +import com.arangodb.model.QueueTimeSample; + +import java.util.Arrays; + +/** + * @author Michele Rastelli + */ +public class QueueTimeMetricsImpl implements QueueTimeMetrics { + private final CircularFifoQueue samples; + + public QueueTimeMetricsImpl(int queueSize) { + samples = new CircularFifoQueue(queueSize); + } + + @Override + public QueueTimeSample[] getValues() { + return samples.getElements(); + } + + @Override + public double getAvg() { + return samples.getAvg(); + } + + void add(double value) { + add(new QueueTimeSample(System.currentTimeMillis(), value)); + } + + void add(QueueTimeSample value) { + samples.add(value); + } + + void clear() { + samples.clear(); + } + + private static class CircularFifoQueue { + private final QueueTimeSample[] elements; + /** + * Capacity of the queue. + */ + private final int size; + /** + * Array index of the oldest queue element. + */ + private int start; + /** + * Amount of elements in the queue. + */ + private int count; + + /** + * Sum of the values in the queue. + */ + private double sum; + + CircularFifoQueue(final int size) { + elements = new QueueTimeSample[size]; + this.size = elements.length; + clear(); + } + + /** + * @return the average of the values in the queue, 0.0 if the queue is empty. + */ + synchronized double getAvg() { + if (count == 0) return 0.0; + return sum / count; + } + + synchronized void clear() { + start = 0; + count = 0; + sum = 0.0; + Arrays.fill(elements, null); + } + + /** + * Adds the given element to this queue. If the queue is full, the least recently added + * element is replaced with the given one. + * + * @param element the element to add + */ + synchronized void add(final QueueTimeSample element) { + if (count < size) { + count++; + } + QueueTimeSample overridden = elements[start]; + if (overridden != null) { + sum -= overridden.value; + } + elements[start++] = element; + if (start >= size) { + start = 0; + } + sum += element.value; + } + + synchronized QueueTimeSample[] getElements() { + QueueTimeSample[] out = new QueueTimeSample[count]; + if (count < size) { + System.arraycopy(elements, 0, out, 0, count); + } else { + System.arraycopy(elements, start, out, 0, size - start); + System.arraycopy(elements, 0, out, size - start, start); + } + return out; + } + + } +} diff --git a/core/src/main/java/com/arangodb/internal/RequestContextHolder.java b/core/src/main/java/com/arangodb/internal/RequestContextHolder.java new file mode 100644 index 000000000..bde22f031 --- /dev/null +++ b/core/src/main/java/com/arangodb/internal/RequestContextHolder.java @@ -0,0 +1,52 @@ +package com.arangodb.internal; + +import com.arangodb.RequestContext; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.Objects; +import java.util.function.Supplier; + +public enum RequestContextHolder { + INSTANCE; + + private final static Logger LOGGER = LoggerFactory.getLogger(RequestContextHolder.class); + + private final ThreadLocal runningWithinCtx = ThreadLocal.withInitial(() -> false); + private final ThreadLocal ctx = ThreadLocal.withInitial(() -> RequestContext.EMPTY); + + public T runWithCtx(RequestContext requestContext, Supplier fun) { + Objects.requireNonNull(requestContext); + RequestContext old = null; + try { + if (runningWithinCtx.get()) { + // re-entrant invocation, keep track of old ctx to restore later + old = ctx.get(); + } + LOGGER.debug("setting RequestContext: {}", requestContext); + ctx.set(requestContext); + runningWithinCtx.set(true); + return fun.get(); + } finally { + if (old == null) { + LOGGER.debug("removing RequestContext"); + ctx.remove(); + runningWithinCtx.remove(); + } else { + // re-entrant invocation, restore old ctx + LOGGER.debug("restore RequestContext: {}", old); + ctx.set(old); + } + } + } + + public RequestContext getCtx() { + if (!runningWithinCtx.get()) { + throw new IllegalStateException("Not within ctx!"); + } + + RequestContext requestContext = ctx.get(); + LOGGER.debug("returning RequestContext: {}", requestContext); + return requestContext; + } +} diff --git a/core/src/main/java/com/arangodb/internal/RequestContextImpl.java b/core/src/main/java/com/arangodb/internal/RequestContextImpl.java new file mode 100644 index 000000000..dfd7e3a3e --- /dev/null +++ b/core/src/main/java/com/arangodb/internal/RequestContextImpl.java @@ -0,0 +1,31 @@ +package com.arangodb.internal; + +import com.arangodb.RequestContext; + +import java.util.Optional; + +public class RequestContextImpl implements RequestContext { + private static final String TRANSACTION_ID = "x-arango-trx-id"; + + private final String streamTransactionId; + + public RequestContextImpl() { + this.streamTransactionId = null; + } + + public RequestContextImpl(InternalRequest request) { + this.streamTransactionId = request.getHeaderParam().get(TRANSACTION_ID); + } + + @Override + public Optional getStreamTransactionId() { + return Optional.ofNullable(streamTransactionId); + } + + @Override + public String toString() { + return "RequestContextImpl{" + + "streamTransactionId='" + streamTransactionId + '\'' + + '}'; + } +} diff --git a/core/src/main/java/com/arangodb/internal/RequestType.java b/core/src/main/java/com/arangodb/internal/RequestType.java new file mode 100644 index 000000000..5b9d27aae --- /dev/null +++ b/core/src/main/java/com/arangodb/internal/RequestType.java @@ -0,0 +1,72 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.internal; + +import com.arangodb.Request; + +/** + * @author Mark Vollmary + */ +public enum RequestType { + + DELETE(0), + GET(1), + POST(2), + PUT(3), + HEAD(4), + PATCH(5), + OPTIONS(6), + VSTREAM_CRED(7), + VSTREAM_REGISTER(8), + VSTREAM_STATUS(9), + ILLEGAL(10); + + private final int type; + + RequestType(final int type) { + this.type = type; + } + + public static RequestType from(final Request.Method method) { + switch (method) { + case DELETE: + return DELETE; + case GET: + return GET; + case POST: + return POST; + case PUT: + return PUT; + case HEAD: + return HEAD; + case PATCH: + return PATCH; + case OPTIONS: + return OPTIONS; + default: + throw new IllegalArgumentException(); + } + } + + public int getType() { + return type; + } +} diff --git a/core/src/main/java/com/arangodb/internal/SearchAliasAsyncImpl.java b/core/src/main/java/com/arangodb/internal/SearchAliasAsyncImpl.java new file mode 100644 index 000000000..fe2763e89 --- /dev/null +++ b/core/src/main/java/com/arangodb/internal/SearchAliasAsyncImpl.java @@ -0,0 +1,108 @@ +/* + * DISCLAIMER + * + * Copyright 2018 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.internal; + +import com.arangodb.*; +import com.arangodb.entity.ViewEntity; +import com.arangodb.entity.arangosearch.SearchAliasPropertiesEntity; +import com.arangodb.model.arangosearch.SearchAliasCreateOptions; +import com.arangodb.model.arangosearch.SearchAliasPropertiesOptions; + +import java.util.Objects; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CompletionException; + +import static com.arangodb.internal.ArangoErrors.ERROR_ARANGO_DATA_SOURCE_NOT_FOUND; +import static com.arangodb.internal.ArangoErrors.matches; + +/** + * @author Michele Rastelli + */ +public class SearchAliasAsyncImpl extends InternalSearchAlias implements SearchAliasAsync { + private final ArangoDatabaseAsync db; + + protected SearchAliasAsyncImpl(final ArangoDatabaseAsyncImpl db, final String name) { + super(db, db.name(), name); + this.db = db; + } + + @Override + public ArangoDatabaseAsync db() { + return db; + } + + @Override + public CompletableFuture exists() { + return getInfo() + .thenApply(Objects::nonNull) + .exceptionally(err -> { + Throwable e = err instanceof CompletionException ? err.getCause() : err; + if (e instanceof ArangoDBException) { + ArangoDBException aEx = (ArangoDBException) e; + if (matches(aEx, 404, ERROR_ARANGO_DATA_SOURCE_NOT_FOUND)) { + return false; + } + } + throw ArangoDBException.of(e); + }); + } + + @Override + public CompletableFuture drop() { + return executorAsync().execute(this::dropRequest, Void.class); + } + + @Override + public CompletableFuture rename(final String newName) { + return executorAsync().execute(() -> renameRequest(newName), ViewEntity.class); + } + + @Override + public CompletableFuture getInfo() { + return executorAsync().execute(this::getInfoRequest, ViewEntity.class); + } + + @Override + public CompletableFuture create() { + return create(new SearchAliasCreateOptions()); + } + + @Override + public CompletableFuture create(final SearchAliasCreateOptions options) { + return db().createSearchAlias(name(), options); + } + + @Override + public CompletableFuture getProperties() { + return executorAsync().execute(this::getPropertiesRequest, SearchAliasPropertiesEntity.class); + } + + @Override + public CompletableFuture updateProperties(final SearchAliasPropertiesOptions options) { + return executorAsync().execute(() -> updatePropertiesRequest(options), SearchAliasPropertiesEntity.class); + } + + @Override + public CompletableFuture replaceProperties(final SearchAliasPropertiesOptions options) { + return executorAsync().execute(() -> replacePropertiesRequest(options), SearchAliasPropertiesEntity.class); + } + +} diff --git a/core/src/main/java/com/arangodb/internal/SearchAliasImpl.java b/core/src/main/java/com/arangodb/internal/SearchAliasImpl.java new file mode 100644 index 000000000..88f24987f --- /dev/null +++ b/core/src/main/java/com/arangodb/internal/SearchAliasImpl.java @@ -0,0 +1,103 @@ +/* + * DISCLAIMER + * + * Copyright 2018 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.internal; + +import com.arangodb.ArangoDBException; +import com.arangodb.ArangoDatabase; +import com.arangodb.SearchAlias; +import com.arangodb.entity.ViewEntity; +import com.arangodb.entity.arangosearch.SearchAliasPropertiesEntity; +import com.arangodb.model.arangosearch.SearchAliasCreateOptions; +import com.arangodb.model.arangosearch.SearchAliasPropertiesOptions; + +import static com.arangodb.internal.ArangoErrors.ERROR_ARANGO_DATA_SOURCE_NOT_FOUND; +import static com.arangodb.internal.ArangoErrors.matches; + +/** + * @author Michele Rastelli + */ +public class SearchAliasImpl extends InternalSearchAlias implements SearchAlias { + private final ArangoDatabase db; + + protected SearchAliasImpl(final ArangoDatabaseImpl db, final String name) { + super(db, db.name(), name); + this.db = db; + } + + @Override + public ArangoDatabase db() { + return db; + } + + @Override + public boolean exists() { + try { + getInfo(); + return true; + } catch (final ArangoDBException e) { + if (matches(e, 404, ERROR_ARANGO_DATA_SOURCE_NOT_FOUND)) { + return false; + } + throw e; + } + } + + @Override + public void drop() { + executorSync().execute(dropRequest(), Void.class); + } + + @Override + public ViewEntity rename(final String newName) { + return executorSync().execute(renameRequest(newName), ViewEntity.class); + } + + @Override + public ViewEntity getInfo() { + return executorSync().execute(getInfoRequest(), ViewEntity.class); + } + + @Override + public ViewEntity create() { + return create(new SearchAliasCreateOptions()); + } + + @Override + public ViewEntity create(final SearchAliasCreateOptions options) { + return db().createSearchAlias(name(), options); + } + + @Override + public SearchAliasPropertiesEntity getProperties() { + return executorSync().execute(getPropertiesRequest(), SearchAliasPropertiesEntity.class); + } + + @Override + public SearchAliasPropertiesEntity updateProperties(final SearchAliasPropertiesOptions options) { + return executorSync().execute(updatePropertiesRequest(options), SearchAliasPropertiesEntity.class); + } + + @Override + public SearchAliasPropertiesEntity replaceProperties(final SearchAliasPropertiesOptions options) { + return executorSync().execute(replacePropertiesRequest(options), SearchAliasPropertiesEntity.class); + } + +} diff --git a/core/src/main/java/com/arangodb/internal/ShadedProxy.java b/core/src/main/java/com/arangodb/internal/ShadedProxy.java new file mode 100644 index 000000000..75a94d294 --- /dev/null +++ b/core/src/main/java/com/arangodb/internal/ShadedProxy.java @@ -0,0 +1,124 @@ +package com.arangodb.internal; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.lang.reflect.InvocationHandler; +import java.lang.reflect.Method; +import java.lang.reflect.Proxy; +import java.util.*; + +public class ShadedProxy { + private static final Logger LOG = LoggerFactory.getLogger(ShadedProxy.class); + private static final ClassLoader classLoader = ShadedProxy.class.getClassLoader(); + + @SuppressWarnings("unchecked") + public static T of(Class i, Object target) { + return (T) Proxy.newProxyInstance( + classLoader, + new Class[]{i}, + new ShadedInvocationHandler(i, target)); + } + + public static Optional getTarget(Object o) { + if (Proxy.isProxyClass(o.getClass())) { + InvocationHandler h = Proxy.getInvocationHandler(o); + if (h instanceof ShadedInvocationHandler) { + return Optional.of(((ShadedInvocationHandler) h).target); + } + } + return Optional.empty(); + } + + private static class ShadedInvocationHandler implements InvocationHandler { + private final Map targetMethods = new HashMap<>(); + private final Map> proxiedReturnTypes = new HashMap<>(); + private final Object target; + + ShadedInvocationHandler(Class i, Object target) { + this.target = target; + Map iMethods = new HashMap<>(); + for (Method method : i.getDeclaredMethods()) { + iMethods.put(new ProxyMethod(method), method); + } + + Method[] methods; + if (target instanceof Class) { + // proxy for static methods + methods = ((Class) target).getMethods(); + } else { + methods = target.getClass().getMethods(); + } + + for (Method method : methods) { + ProxyMethod pm = new ProxyMethod(method); + Method iMethod = iMethods.get(pm); + if (iMethod != null) { + LOG.trace("adding {}", iMethod); + targetMethods.put(pm, method); + Class mRet = method.getReturnType(); + Class iRet = iMethod.getReturnType(); + if (!mRet.equals(iRet)) { + LOG.trace("adding proxied return type {}", iRet); + proxiedReturnTypes.put(pm, iRet); + } + } + } + } + + @Override + public Object invoke(Object proxy, Method method, Object[] args) throws Exception { + ProxyMethod pm = new ProxyMethod(method); + Method targetMethod = targetMethods.get(pm); + LOG.trace("Proxying invocation \n\t of: {} \n\t to: {}", method, targetMethod); + Class returnProxy = proxiedReturnTypes.get(pm); + Object[] realArgs; + if (args == null) { + realArgs = null; + } else { + realArgs = new Object[args.length]; + for (int i = 0; i < args.length; i++) { + realArgs[i] = ShadedProxy.getTarget(args[i]).orElse(args[i]); + } + } + Object res = targetMethod.invoke(target, realArgs); + if (returnProxy != null) { + LOG.trace("proxying return type \n\t of: {} \n\t to: {}", targetMethod.getReturnType(), returnProxy); + return ShadedProxy.of(returnProxy, res); + } else { + return res; + } + } + + private static class ProxyMethod { + private final String name; + private final String simpleReturnType; + private final String[] simpleParameterTypes; + + public ProxyMethod(Method method) { + name = method.getName(); + simpleReturnType = method.getReturnType().getSimpleName(); + simpleParameterTypes = new String[method.getParameterTypes().length]; + for (int i = 0; i < method.getParameterTypes().length; i++) { + simpleParameterTypes[i] = method.getParameterTypes()[i].getSimpleName(); + } + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + ProxyMethod that = (ProxyMethod) o; + return Objects.equals(name, that.name) && Objects.equals(simpleReturnType, that.simpleReturnType) && Arrays.equals(simpleParameterTypes, that.simpleParameterTypes); + } + + @Override + public int hashCode() { + int result = Objects.hash(name, simpleReturnType); + result = 31 * result + Arrays.hashCode(simpleParameterTypes); + return result; + } + } + } + +} diff --git a/core/src/main/java/com/arangodb/internal/config/ArangoConfig.java b/core/src/main/java/com/arangodb/internal/config/ArangoConfig.java new file mode 100644 index 000000000..a13d41cdb --- /dev/null +++ b/core/src/main/java/com/arangodb/internal/config/ArangoConfig.java @@ -0,0 +1,393 @@ +package com.arangodb.internal.config; + +import com.arangodb.Compression; +import com.arangodb.Protocol; +import com.arangodb.arch.UsedInApi; +import com.arangodb.config.ArangoConfigProperties; +import com.arangodb.config.HostDescription; +import com.arangodb.config.ProtocolConfig; +import com.arangodb.entity.LoadBalancingStrategy; +import com.arangodb.internal.ArangoDefaults; +import com.arangodb.internal.serde.ContentTypeFactory; +import com.arangodb.internal.serde.InternalSerde; +import com.arangodb.internal.serde.InternalSerdeProvider; +import com.arangodb.serde.ArangoSerde; +import com.arangodb.serde.ArangoSerdeProvider; +import com.fasterxml.jackson.databind.Module; + +import javax.net.ssl.SSLContext; +import javax.net.ssl.TrustManagerFactory; +import java.io.ByteArrayInputStream; +import java.lang.reflect.InvocationTargetException; +import java.security.KeyStore; +import java.security.cert.Certificate; +import java.security.cert.CertificateFactory; +import java.util.*; +import java.util.concurrent.Executor; +import java.util.stream.Collectors; + +@UsedInApi +public class ArangoConfig { + private final List hosts = new ArrayList<>(); + private Protocol protocol; + private Integer timeout; + private String user; + private String password; + private String jwt; + private Boolean useSsl; + private Optional sslCertValue; + private Optional sslAlgorithm; + private String sslProtocol; + private SSLContext sslContext; + private Boolean verifyHost; + private Integer chunkSize; + private Boolean pipelining; + private Integer maxConnections; + private Long connectionTtl; + private Integer keepAliveInterval; + private Boolean acquireHostList; + private Integer acquireHostListInterval; + private LoadBalancingStrategy loadBalancingStrategy; + private InternalSerde internalSerde; + private ArangoSerde userDataSerde; + private Class serdeProviderClass; + private Integer responseQueueTimeSamples; + private Module protocolModule; + private Executor asyncExecutor; + private Compression compression; + private Integer compressionThreshold; + private Integer compressionLevel; + private ProtocolConfig protocolConfig; + + public ArangoConfig() { + // load default properties + loadProperties(new ArangoConfigProperties() { + }); + } + + public void loadProperties(final ArangoConfigProperties properties) { + hosts.addAll(properties.getHosts().orElse(ArangoDefaults.DEFAULT_HOSTS).stream() + .map(it -> new HostDescription(it.getHost(), it.getPort())) + .collect(Collectors.toList())); + protocol = properties.getProtocol().orElse(ArangoDefaults.DEFAULT_PROTOCOL); + timeout = properties.getTimeout().orElse(ArangoDefaults.DEFAULT_TIMEOUT); + user = properties.getUser().orElse(ArangoDefaults.DEFAULT_USER); + // FIXME: make password field Optional + password = properties.getPassword().orElse(null); + // FIXME: make jwt field Optional + jwt = properties.getJwt().orElse(null); + useSsl = properties.getUseSsl().orElse(ArangoDefaults.DEFAULT_USE_SSL); + sslCertValue = properties.getSslCertValue(); + sslAlgorithm = properties.getSslAlgorithm(); + sslProtocol = properties.getSslProtocol().orElse(ArangoDefaults.DEFAULT_SSL_PROTOCOL); + verifyHost = properties.getVerifyHost().orElse(ArangoDefaults.DEFAULT_VERIFY_HOST); + chunkSize = properties.getChunkSize().orElse(ArangoDefaults.DEFAULT_CHUNK_SIZE); + pipelining = properties.getPipelining().orElse(ArangoDefaults.DEFAULT_PIPELINING); + // FIXME: make maxConnections field Optional + maxConnections = properties.getMaxConnections().orElse(null); + // FIXME: make connectionTtl field Optional + connectionTtl = properties.getConnectionTtl().orElse(null); + // FIXME: make keepAliveInterval field Optional + keepAliveInterval = properties.getKeepAliveInterval().orElse(null); + acquireHostList = properties.getAcquireHostList().orElse(ArangoDefaults.DEFAULT_ACQUIRE_HOST_LIST); + acquireHostListInterval = properties.getAcquireHostListInterval().orElse(ArangoDefaults.DEFAULT_ACQUIRE_HOST_LIST_INTERVAL); + loadBalancingStrategy = properties.getLoadBalancingStrategy().orElse(ArangoDefaults.DEFAULT_LOAD_BALANCING_STRATEGY); + responseQueueTimeSamples = properties.getResponseQueueTimeSamples().orElse(ArangoDefaults.DEFAULT_RESPONSE_QUEUE_TIME_SAMPLES); + compression = properties.getCompression().orElse(ArangoDefaults.DEFAULT_COMPRESSION); + compressionThreshold = properties.getCompressionThreshold().orElse(ArangoDefaults.DEFAULT_COMPRESSION_THRESHOLD); + compressionLevel = properties.getCompressionLevel().orElse(ArangoDefaults.DEFAULT_COMPRESSION_LEVEL); + serdeProviderClass = properties.getSerdeProviderClass().map((String className) -> { + try { + //noinspection unchecked + return (Class) Class.forName(className); + } catch (ClassNotFoundException e) { + throw new RuntimeException(e); + } + }).orElse(null); + } + + public List getHosts() { + return hosts; + } + + public void addHost(HostDescription hostDescription) { + hosts.add(hostDescription); + } + + public Protocol getProtocol() { + return protocol; + } + + public void setProtocol(Protocol protocol) { + this.protocol = protocol; + } + + public Integer getTimeout() { + return timeout; + } + + public void setTimeout(Integer timeout) { + this.timeout = timeout; + } + + public String getUser() { + return user; + } + + public void setUser(String user) { + this.user = user; + } + + public String getPassword() { + return password; + } + + public void setPassword(String password) { + this.password = password; + } + + public String getJwt() { + return jwt; + } + + public void setJwt(String jwt) { + this.jwt = jwt; + } + + public Boolean getUseSsl() { + return useSsl; + } + + public void setUseSsl(Boolean useSsl) { + this.useSsl = useSsl; + } + + public void setSslCertValue(String sslCertValue) { + this.sslCertValue = Optional.ofNullable(sslCertValue); + } + + public void setSslAlgorithm(String sslAlgorithm) { + this.sslAlgorithm = Optional.ofNullable(sslAlgorithm); + } + + public void setSslProtocol(String sslProtocol) { + this.sslProtocol = sslProtocol; + } + + public SSLContext getSslContext() { + if (sslContext == null) { + sslContext = createSslContext(); + } + return sslContext; + } + + public void setSslContext(SSLContext sslContext) { + this.sslContext = sslContext; + } + + public Boolean getVerifyHost() { + return verifyHost; + } + + public void setVerifyHost(Boolean verifyHost) { + this.verifyHost = verifyHost; + } + + public Integer getChunkSize() { + return chunkSize; + } + + public void setChunkSize(Integer chunkSize) { + this.chunkSize = chunkSize; + } + + public Boolean getPipelining() { + return pipelining; + } + + public void setPipelining(Boolean pipelining) { + this.pipelining = pipelining; + } + + public Integer getMaxConnections() { + if (maxConnections == null) { + maxConnections = getDefaultMaxConnections(); + } + return maxConnections; + } + + private int getDefaultMaxConnections() { + int defaultMaxConnections; + switch (getProtocol()) { + case VST: + defaultMaxConnections = ArangoDefaults.MAX_CONNECTIONS_VST_DEFAULT; + break; + case HTTP_JSON: + case HTTP_VPACK: + defaultMaxConnections = ArangoDefaults.MAX_CONNECTIONS_HTTP_DEFAULT; + break; + case HTTP2_JSON: + case HTTP2_VPACK: + defaultMaxConnections = ArangoDefaults.MAX_CONNECTIONS_HTTP2_DEFAULT; + break; + default: + throw new IllegalArgumentException(); + } + return defaultMaxConnections; + } + + public void setMaxConnections(Integer maxConnections) { + this.maxConnections = maxConnections; + } + + public Long getConnectionTtl() { + if (connectionTtl == null && getProtocol() != Protocol.VST) { + connectionTtl = ArangoDefaults.DEFAULT_CONNECTION_TTL_HTTP; + } + return connectionTtl; + } + + public void setConnectionTtl(Long connectionTtl) { + this.connectionTtl = connectionTtl; + } + + public Integer getKeepAliveInterval() { + return keepAliveInterval; + } + + public void setKeepAliveInterval(Integer keepAliveInterval) { + this.keepAliveInterval = keepAliveInterval; + } + + public Boolean getAcquireHostList() { + return acquireHostList; + } + + public void setAcquireHostList(Boolean acquireHostList) { + this.acquireHostList = acquireHostList; + } + + public Integer getAcquireHostListInterval() { + return acquireHostListInterval; + } + + public void setAcquireHostListInterval(Integer acquireHostListInterval) { + this.acquireHostListInterval = acquireHostListInterval; + } + + public LoadBalancingStrategy getLoadBalancingStrategy() { + return loadBalancingStrategy; + } + + public void setLoadBalancingStrategy(LoadBalancingStrategy loadBalancingStrategy) { + this.loadBalancingStrategy = loadBalancingStrategy; + } + + public Class getSerdeProviderClass() { + return serdeProviderClass; + } + + public ArangoSerde getUserDataSerde() { + if (userDataSerde != null) { + return userDataSerde; + } else if (serdeProviderClass != null) { + try { + return serdeProviderClass.getDeclaredConstructor().newInstance().create(); + } catch (InstantiationException | IllegalAccessException | InvocationTargetException | + NoSuchMethodException e) { + throw new RuntimeException(e); + } + } else { + return ArangoSerdeProvider.of(ContentTypeFactory.of(getProtocol())).create(); + } + } + + public InternalSerde getInternalSerde() { + if (internalSerde == null) { + internalSerde = new InternalSerdeProvider(ContentTypeFactory.of(getProtocol())).create(getUserDataSerde(), protocolModule); + } + return internalSerde; + } + + public void setUserDataSerde(ArangoSerde userDataSerde) { + this.userDataSerde = userDataSerde; + } + + public void setUserDataSerdeProvider(Class serdeProviderClass) { + this.serdeProviderClass = serdeProviderClass; + } + + public Integer getResponseQueueTimeSamples() { + return responseQueueTimeSamples; + } + + public void setResponseQueueTimeSamples(Integer responseQueueTimeSamples) { + this.responseQueueTimeSamples = responseQueueTimeSamples; + } + + public void setProtocolModule(Module m) { + protocolModule = m; + } + + public Executor getAsyncExecutor() { + return asyncExecutor; + } + + public void setAsyncExecutor(Executor asyncExecutor) { + this.asyncExecutor = asyncExecutor; + } + + public Compression getCompression() { + return compression; + } + + public void setCompression(Compression compression) { + this.compression = compression; + } + + public Integer getCompressionThreshold() { + return compressionThreshold; + } + + public void setCompressionThreshold(Integer compressionThreshold) { + this.compressionThreshold = compressionThreshold; + } + + public Integer getCompressionLevel() { + return compressionLevel; + } + + public void setCompressionLevel(Integer compressionLevel) { + this.compressionLevel = compressionLevel; + } + + public ProtocolConfig getProtocolConfig() { + return protocolConfig; + } + + public void setProtocolConfig(ProtocolConfig protocolConfig) { + this.protocolConfig = protocolConfig; + } + + private SSLContext createSslContext() { + try { + if (sslCertValue.isPresent()) { + ByteArrayInputStream is = new ByteArrayInputStream(Base64.getDecoder().decode(sslCertValue.get())); + Certificate cert = CertificateFactory.getInstance("X.509").generateCertificate(is); + KeyStore ks = KeyStore.getInstance(KeyStore.getDefaultType()); + ks.load(null); + ks.setCertificateEntry("arangodb", cert); + TrustManagerFactory tmf = TrustManagerFactory.getInstance(sslAlgorithm.orElseGet(TrustManagerFactory::getDefaultAlgorithm)); + tmf.init(ks); + SSLContext sc = SSLContext.getInstance(sslProtocol); + sc.init(null, tmf.getTrustManagers(), null); + return sc; + } else { + return SSLContext.getDefault(); + } + } catch (Exception e) { + throw new RuntimeException(e); + } + } + +} diff --git a/core/src/main/java/com/arangodb/internal/config/ArangoConfigPropertiesImpl.java b/core/src/main/java/com/arangodb/internal/config/ArangoConfigPropertiesImpl.java new file mode 100644 index 000000000..c1eadb402 --- /dev/null +++ b/core/src/main/java/com/arangodb/internal/config/ArangoConfigPropertiesImpl.java @@ -0,0 +1,204 @@ +package com.arangodb.internal.config; + +import com.arangodb.ArangoDBException; +import com.arangodb.Compression; +import com.arangodb.Protocol; +import com.arangodb.config.ArangoConfigProperties; +import com.arangodb.config.HostDescription; +import com.arangodb.entity.LoadBalancingStrategy; + +import java.io.InputStream; +import java.util.ArrayList; +import java.util.List; +import java.util.Optional; +import java.util.Properties; + +/** + * ArangoConfigProperties implementation that reads configuration entries from local file. Properties path prefix can be + * configured, so that it is possible to distinguish configurations for multiple driver instances in the same file. + */ +public final class ArangoConfigPropertiesImpl implements ArangoConfigProperties { + private static final String DEFAULT_PREFIX = "arangodb"; + private static final String DEFAULT_PROPERTY_FILE = "arangodb.properties"; + private final Properties properties; + private final String prefix; + + public ArangoConfigPropertiesImpl() { + this(DEFAULT_PROPERTY_FILE, DEFAULT_PREFIX); + } + + public ArangoConfigPropertiesImpl(final String fileName) { + this(fileName, DEFAULT_PREFIX); + } + + public ArangoConfigPropertiesImpl(final String fileName, final String prefix) { + this(initProperties(fileName), prefix); + } + + public ArangoConfigPropertiesImpl(final Properties properties) { + this(properties, DEFAULT_PREFIX); + } + + public ArangoConfigPropertiesImpl(final Properties properties, final String prefix) { + this.properties = properties; + this.prefix = initPrefix(prefix); + } + + private static Properties initProperties(String fileName) { + Properties p = new Properties(); + try (InputStream is = ArangoConfigPropertiesImpl.class.getClassLoader().getResourceAsStream(fileName)) { + p.load(is); + } catch (Exception e) { + throw ArangoDBException.of("Got exception while reading properties file " + fileName, e); + } + return p; + } + + private String initPrefix(String p) { + if (p == null) { + return ""; + } else { + return p + "."; + } + } + + private String getProperty(String key) { + return properties.getProperty(prefix + key); + } + + @Override + public Optional> getHosts() { + return Optional.ofNullable(getProperty(KEY_HOSTS)) + .map(s -> { + List hostDescriptions = new ArrayList<>(); + String[] hosts = s.split(","); + for (String host : hosts) { + hostDescriptions.add(HostDescription.parse(host)); + } + return hostDescriptions; + }); + } + + @Override + public Optional getProtocol() { + return Optional.ofNullable(getProperty(KEY_PROTOCOL)).map(Protocol::valueOf); + } + + @Override + public Optional getUser() { + return Optional.ofNullable(getProperty(KEY_USER)); + } + + @Override + public Optional getPassword() { + return Optional.ofNullable(getProperty(KEY_PASSWORD)); + } + + @Override + public Optional getJwt() { + return Optional.ofNullable(getProperty(KEY_JWT)); + } + + @Override + public Optional getTimeout() { + return Optional.ofNullable(getProperty(KEY_TIMEOUT)).map(Integer::valueOf); + } + + @Override + public Optional getUseSsl() { + return Optional.ofNullable(getProperty(KEY_USE_SSL)).map(Boolean::valueOf); + } + + @Override + public Optional getSslCertValue() { + return Optional.ofNullable(getProperty(KEY_SSL_CERT_VALUE)); + } + + @Override + public Optional getSslAlgorithm() { + return Optional.ofNullable(getProperty(KEY_SSL_ALGORITHM)); + } + + @Override + public Optional getSslProtocol() { + return Optional.ofNullable(getProperty(KEY_SSL_PROTOCOL)); + } + + @Override + public Optional getVerifyHost() { + return Optional.ofNullable(getProperty(KEY_VERIFY_HOST)).map(Boolean::valueOf); + } + + @Override + public Optional getChunkSize() { + return Optional.ofNullable(getProperty(KEY_CHUNK_SIZE)).map(Integer::valueOf); + } + + @Override + public Optional getPipelining() { + return Optional.ofNullable(getProperty(KEY_PIPELINING)).map(Boolean::valueOf); + } + + @Override + public Optional getMaxConnections() { + return Optional.ofNullable(getProperty(KEY_MAX_CONNECTIONS)).map(Integer::valueOf); + } + + @Override + public Optional getConnectionTtl() { + return Optional.ofNullable(getProperty(KEY_CONNECTION_TTL)).map(Long::valueOf); + } + + @Override + public Optional getKeepAliveInterval() { + return Optional.ofNullable(getProperty(KEY_KEEP_ALIVE_INTERVAL)).map(Integer::valueOf); + } + + @Override + public Optional getAcquireHostList() { + return Optional.ofNullable(getProperty(KEY_ACQUIRE_HOST_LIST)).map(Boolean::valueOf); + } + + @Override + public Optional getAcquireHostListInterval() { + return Optional.ofNullable(getProperty(KEY_ACQUIRE_HOST_LIST_INTERVAL)).map(Integer::valueOf); + } + + @Override + public Optional getLoadBalancingStrategy() { + return Optional.ofNullable(getProperty(KEY_LOAD_BALANCING_STRATEGY)).map(LoadBalancingStrategy::valueOf); + } + + @Override + public Optional getResponseQueueTimeSamples() { + return Optional.ofNullable(getProperty(KEY_RESPONSE_QUEUE_TIME_SAMPLES)).map(Integer::valueOf); + } + + @Override + public Optional getCompression() { + return Optional.ofNullable(getProperty(KEY_COMPRESSION)).map(Compression::valueOf); + } + + @Override + public Optional getCompressionThreshold() { + return Optional.ofNullable(getProperty(KEY_COMPRESSION_THRESHOLD)).map(Integer::valueOf); + } + + @Override + public Optional getCompressionLevel() { + return Optional.ofNullable(getProperty(KEY_COMPRESSION_LEVEL)).map(Integer::valueOf); + } + + @Override + public Optional getSerdeProviderClass() { + return Optional.ofNullable(getProperty(KEY_SERDE_PROVIDER_CLASS)); + } + + @Override + public String toString() { + return "ArangoConfigPropertiesImpl{" + + "prefix='" + prefix + '\'' + + ", properties=" + properties + + '}'; + } +} diff --git a/core/src/main/java/com/arangodb/internal/cursor/ArangoCursorAsyncImpl.java b/core/src/main/java/com/arangodb/internal/cursor/ArangoCursorAsyncImpl.java new file mode 100644 index 000000000..5e94adc43 --- /dev/null +++ b/core/src/main/java/com/arangodb/internal/cursor/ArangoCursorAsyncImpl.java @@ -0,0 +1,72 @@ +package com.arangodb.internal.cursor; + +import com.arangodb.ArangoCursorAsync; +import com.arangodb.ArangoDBException; +import com.arangodb.entity.CursorEntity; +import com.arangodb.internal.ArangoDatabaseAsyncImpl; +import com.arangodb.internal.InternalArangoCursor; +import com.arangodb.internal.net.HostHandle; + +import java.util.NoSuchElementException; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CompletionException; + +import static com.arangodb.internal.ArangoErrors.matches; + +public class ArangoCursorAsyncImpl extends InternalArangoCursor implements ArangoCursorAsync { + + private final ArangoDatabaseAsyncImpl db; + private final HostHandle hostHandle; + private final CursorEntity entity; + + public ArangoCursorAsyncImpl( + final ArangoDatabaseAsyncImpl db, + final CursorEntity entity, + final Class type, + final HostHandle hostHandle, + final Boolean allowRetry + ) { + super(db, db.name(), entity, type, allowRetry); + this.db = db; + this.hostHandle = hostHandle; + this.entity = entity; + } + + @Override + public CompletableFuture> nextBatch() { + if (Boolean.TRUE.equals(hasMore())) { + return executorAsync().execute(this::queryNextRequest, db.cursorEntityDeserializer(getType()), hostHandle) + .thenApply(r -> { + // needed because the latest batch does not return the cursor id + r.setId(entity.getId()); + return new ArangoCursorAsyncImpl<>(db, r, getType(), hostHandle, allowRetry()); + }); + } else { + CompletableFuture> cf = new CompletableFuture<>(); + cf.completeExceptionally(new NoSuchElementException()); + return cf; + } + } + + @Override + public CompletableFuture close() { + if (getId() != null && (allowRetry() || Boolean.TRUE.equals(hasMore()))) { + return executorAsync().execute(this::queryCloseRequest, Void.class, hostHandle) + .exceptionally(err -> { + Throwable e = err instanceof CompletionException ? err.getCause() : err; + if (e instanceof ArangoDBException) { + ArangoDBException aEx = (ArangoDBException) e; + // ignore errors Response: 404, Error: 1600 - cursor not found + if (matches(aEx, 404, 1600)) { + return null; + } + } + throw ArangoDBException.of(e); + }) + .thenApply(__ -> null); + } else { + return CompletableFuture.completedFuture(null); + } + } + +} diff --git a/core/src/main/java/com/arangodb/internal/cursor/ArangoCursorImpl.java b/core/src/main/java/com/arangodb/internal/cursor/ArangoCursorImpl.java new file mode 100644 index 000000000..0c279f15b --- /dev/null +++ b/core/src/main/java/com/arangodb/internal/cursor/ArangoCursorImpl.java @@ -0,0 +1,159 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.internal.cursor; + +import com.arangodb.ArangoCursor; +import com.arangodb.ArangoIterator; +import com.arangodb.entity.CursorEntity; +import com.arangodb.entity.CursorStats; +import com.arangodb.entity.CursorWarning; +import com.arangodb.internal.ArangoCursorExecute; + +import java.util.Collection; +import java.util.Iterator; +import java.util.NoSuchElementException; + +/** + * @author Mark Vollmary + */ +public class ArangoCursorImpl implements ArangoCursor { + + protected final ArangoCursorIterator iterator; + private final Class type; + private final String id; + private final ArangoCursorExecute execute; + private final boolean pontentialDirtyRead; + private final boolean allowRetry; + + public ArangoCursorImpl(final ArangoCursorExecute execute, + final Class type, final CursorEntity result, final Boolean allowRetry) { + super(); + this.execute = execute; + this.type = type; + id = result.getId(); + pontentialDirtyRead = result.isPotentialDirtyRead(); + iterator = new ArangoCursorIterator<>(id, execute, result); + this.allowRetry = Boolean.TRUE.equals(allowRetry); + } + + @Override + public void close() { + if (getId() != null && (allowRetry || iterator.result.getHasMore())) { + getExecute().close(getId()); + } + } + + @Override + public T next() { + return iterator.next(); + } + + @Override + public String getId() { + return id; + } + + @Override + public Class getType() { + return type; + } + + @Override + public Integer getCount() { + return iterator.result.getCount(); + } + + @Override + public CursorStats getStats() { + final CursorEntity.Extras extra = iterator.result.getExtra(); + return extra != null ? extra.getStats() : null; + } + + @Override + public Collection getWarnings() { + final CursorEntity.Extras extra = iterator.result.getExtra(); + return extra != null ? extra.getWarnings() : null; + } + + @Override + public boolean isCached() { + final Boolean cached = iterator.result.getCached(); + return Boolean.TRUE.equals(cached); + } + + @Override + public boolean hasNext() { + return iterator.hasNext(); + } + + @Override + public boolean isPotentialDirtyRead() { + return pontentialDirtyRead; + } + + @Override + public ArangoIterator iterator() { + return iterator; + } + + @Override + public String getNextBatchId() { + return iterator.result.getNextBatchId(); + } + + protected ArangoCursorExecute getExecute() { + return execute; + } + + protected static class ArangoCursorIterator implements ArangoIterator { + private final String cursorId; + private final ArangoCursorExecute execute; + private CursorEntity result; + private Iterator arrayIterator; + + protected ArangoCursorIterator(final String cursorId, final ArangoCursorExecute execute, + final CursorEntity result) { + this.cursorId = cursorId; + this.execute = execute; + this.result = result; + arrayIterator = result.getResult().iterator(); + } + + @Override + public boolean hasNext() { + return arrayIterator.hasNext() || result.getHasMore(); + } + + @Override + public T next() { + if (!arrayIterator.hasNext() && Boolean.TRUE.equals(result.getHasMore())) { + result = execute.next(cursorId, result.getNextBatchId()); + arrayIterator = result.getResult().iterator(); + } + if (!hasNext()) { + throw new NoSuchElementException(); + } + return arrayIterator.next(); + } + } + +} + diff --git a/core/src/main/java/com/arangodb/internal/net/AccessType.java b/core/src/main/java/com/arangodb/internal/net/AccessType.java new file mode 100644 index 000000000..c228074c5 --- /dev/null +++ b/core/src/main/java/com/arangodb/internal/net/AccessType.java @@ -0,0 +1,30 @@ +/* + * DISCLAIMER + * + * Copyright 2018 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.internal.net; + +/** + * @author Mark Vollmary + */ +public enum AccessType { + + WRITE, READ, DIRTY_READ + +} diff --git a/src/main/java/com/arangodb/internal/net/ArangoDBRedirectException.java b/core/src/main/java/com/arangodb/internal/net/ArangoDBRedirectException.java similarity index 72% rename from src/main/java/com/arangodb/internal/net/ArangoDBRedirectException.java rename to core/src/main/java/com/arangodb/internal/net/ArangoDBRedirectException.java index a8461572c..bf8e0c9cd 100644 --- a/src/main/java/com/arangodb/internal/net/ArangoDBRedirectException.java +++ b/core/src/main/java/com/arangodb/internal/net/ArangoDBRedirectException.java @@ -1,43 +1,42 @@ -/* - * DISCLAIMER - * - * Copyright 2017 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.internal.net; - -import com.arangodb.ArangoDBException; - -/** - * @author Mark Vollmary - * - */ -public class ArangoDBRedirectException extends ArangoDBException { - - private static final long serialVersionUID = -94810262465567613L; - private final String location; - - public ArangoDBRedirectException(final String message, final String location) { - super(message); - this.location = location; - } - - public String getLocation() { - return location; - } - -} +/* + * DISCLAIMER + * + * Copyright 2017 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.internal.net; + +import com.arangodb.ArangoDBException; + +/** + * @author Mark Vollmary + */ +public class ArangoDBRedirectException extends ArangoDBException { + + private static final long serialVersionUID = -94810262465567613L; + private final String location; + + public ArangoDBRedirectException(final String message, final String location) { + super(message); + this.location = location; + } + + public String getLocation() { + return location; + } + +} diff --git a/core/src/main/java/com/arangodb/internal/net/ArangoDBUnavailableException.java b/core/src/main/java/com/arangodb/internal/net/ArangoDBUnavailableException.java new file mode 100644 index 000000000..26f8fde44 --- /dev/null +++ b/core/src/main/java/com/arangodb/internal/net/ArangoDBUnavailableException.java @@ -0,0 +1,39 @@ +/* + * DISCLAIMER + * + * Copyright 2017 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.internal.net; + +import com.arangodb.ArangoDBException; +import com.arangodb.entity.ErrorEntity; + +public class ArangoDBUnavailableException extends ArangoDBException { + + public static ArangoDBUnavailableException from(final ErrorEntity errorEntity) { + if (errorEntity == null || errorEntity.getCode() != 503 || errorEntity.getErrorNum() != 503) { + throw new IllegalArgumentException(); + } + return new ArangoDBUnavailableException(errorEntity); + } + + private ArangoDBUnavailableException(final ErrorEntity errorEntity) { + super(errorEntity); + } + +} diff --git a/core/src/main/java/com/arangodb/internal/net/Communication.java b/core/src/main/java/com/arangodb/internal/net/Communication.java new file mode 100644 index 000000000..26251e33d --- /dev/null +++ b/core/src/main/java/com/arangodb/internal/net/Communication.java @@ -0,0 +1,165 @@ +package com.arangodb.internal.net; + +import com.arangodb.ArangoDBException; +import com.arangodb.arch.UsedInApi; +import com.arangodb.config.HostDescription; +import com.arangodb.internal.InternalRequest; +import com.arangodb.internal.InternalResponse; +import com.arangodb.internal.RequestType; +import com.arangodb.internal.config.ArangoConfig; +import com.arangodb.internal.serde.InternalSerde; +import com.arangodb.internal.util.HostUtils; +import com.arangodb.internal.util.RequestUtils; +import com.arangodb.internal.util.ResponseUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.Closeable; +import java.io.IOException; +import java.net.ConnectException; +import java.net.SocketTimeoutException; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CompletionException; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicLong; + +@UsedInApi +public abstract class Communication implements Closeable { + private static final Logger LOGGER = LoggerFactory.getLogger(Communication.class); + protected final HostHandler hostHandler; + protected final InternalSerde serde; + private final AtomicLong reqCount; + + + protected Communication(final ArangoConfig config, final HostHandler hostHandler) { + this.hostHandler = hostHandler; + serde = config.getInternalSerde(); + reqCount = new AtomicLong(); + } + + protected abstract void connect(final Connection conn) throws IOException; + + @Override + public void close() throws IOException { + hostHandler.close(); + } + + public CompletableFuture executeAsync(final InternalRequest request, final HostHandle hostHandle) { + return executeAsync(request, hostHandle, hostHandler.get(hostHandle, RequestUtils.determineAccessType(request)), 0); + } + + private CompletableFuture executeAsync(final InternalRequest request, final HostHandle hostHandle, final Host host, final int attemptCount) { + long reqId = reqCount.getAndIncrement(); + return host.connection().thenCompose(c -> + doExecuteAsync(request, hostHandle, host, attemptCount, c, reqId) + .whenComplete((r, t) -> c.release())); + } + + private CompletableFuture doExecuteAsync( + final InternalRequest request, final HostHandle hostHandle, final Host host, final int attemptCount, Connection connection, long reqId + ) { + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("Send Request [id={}]: {} {}", reqId, request, serde.toJsonString(request.getBody())); + } + final CompletableFuture rfuture = new CompletableFuture<>(); + try { + connect(connection); + } catch (IOException e) { + handleException(true, e, hostHandle, request, host, reqId, attemptCount, rfuture); + return rfuture; + } + + connection.executeAsync(request) + .whenComplete((response, e) -> { + try { + if (e instanceof SocketTimeoutException) { + // SocketTimeoutException exceptions are wrapped and rethrown. + TimeoutException te = new TimeoutException(e.getMessage()); + te.initCause(e); + rfuture.completeExceptionally(ArangoDBException.of(te, reqId)); + } else if (e instanceof TimeoutException) { + rfuture.completeExceptionally(ArangoDBException.of(e, reqId)); + } else if (e instanceof ConnectException) { + handleException(true, e, hostHandle, request, host, reqId, attemptCount, rfuture); + } else if (e != null) { + handleException(isSafe(request), e, hostHandle, request, host, reqId, attemptCount, rfuture); + } else { + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("Received Response [id={}]: {} {}", reqId, response, serde.toJsonString(response.getBody())); + } + ArangoDBException errorEntityEx = ResponseUtils.translateError(serde, response); + if (errorEntityEx instanceof ArangoDBRedirectException) { + if (attemptCount >= 3) { + rfuture.completeExceptionally(errorEntityEx); + } else { + final String location = ((ArangoDBRedirectException) errorEntityEx).getLocation(); + final HostDescription redirectHost = HostUtils.createFromLocation(location); + hostHandler.failIfNotMatch(redirectHost, errorEntityEx); + mirror( + executeAsync(request, new HostHandle().setHost(redirectHost), hostHandler.get(hostHandle, RequestUtils.determineAccessType(request)), attemptCount + 1), + rfuture + ); + } + } else if (errorEntityEx instanceof ArangoDBUnavailableException) { + handleException(true, errorEntityEx, hostHandle, request, host, reqId, attemptCount, rfuture); + } else if (errorEntityEx != null) { + rfuture.completeExceptionally(errorEntityEx); + } else { + hostHandler.success(); + rfuture.complete(response); + } + } + } catch (Exception ex) { + rfuture.completeExceptionally(ArangoDBException.of(ex, reqId)); + } + }); + return rfuture; + } + + private void handleException(boolean isSafe, Throwable e, HostHandle hostHandle, InternalRequest request, Host host, + long reqId, int attemptCount, CompletableFuture rfuture) { + IOException ioEx = wrapIOEx(e); + hostHandler.fail(ioEx); + if (hostHandle != null && hostHandle.getHost() != null) { + hostHandle.setHost(null); + } + hostHandler.checkNext(hostHandle, RequestUtils.determineAccessType(request)); + if (isSafe) { + Host nextHost = hostHandler.get(hostHandle, RequestUtils.determineAccessType(request)); + LOGGER.warn("Could not connect to {} while executing request [id={}]", + host.getDescription(), reqId, ioEx); + LOGGER.debug("Try connecting to {}", nextHost.getDescription()); + mirror( + executeAsync(request, hostHandle, nextHost, attemptCount), + rfuture + ); + } else { + ArangoDBException aEx = ArangoDBException.of(ioEx, reqId); + rfuture.completeExceptionally(aEx); + } + } + + private void mirror(CompletableFuture up, CompletableFuture down) { + up.whenComplete((v, err) -> { + if (err != null) { + down.completeExceptionally(err instanceof CompletionException ? err.getCause() : err); + } else { + down.complete(v); + } + }); + } + + private static IOException wrapIOEx(Throwable t) { + if (t instanceof IOException) { + return (IOException) t; + } else { + return new IOException(t); + } + } + + private boolean isSafe(final InternalRequest request) { + RequestType type = request.getRequestType(); + return type == RequestType.GET || type == RequestType.HEAD || type == RequestType.OPTIONS; + } + +} diff --git a/core/src/main/java/com/arangodb/internal/net/CommunicationProtocol.java b/core/src/main/java/com/arangodb/internal/net/CommunicationProtocol.java new file mode 100644 index 000000000..62f0f4000 --- /dev/null +++ b/core/src/main/java/com/arangodb/internal/net/CommunicationProtocol.java @@ -0,0 +1,53 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.internal.net; + +import com.arangodb.ArangoDBException; +import com.arangodb.arch.UsedInApi; +import com.arangodb.internal.InternalRequest; +import com.arangodb.internal.InternalResponse; + +import java.io.Closeable; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; + +/** + * @author Mark Vollmary + */ +@UsedInApi +public interface CommunicationProtocol extends Closeable { + + default InternalResponse execute(final InternalRequest request, final HostHandle hostHandle) { + try { + return executeAsync(request, hostHandle).get(); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw ArangoDBException.of(e); + } catch (ExecutionException e) { + throw ArangoDBException.of(e.getCause()); + } + } + + CompletableFuture executeAsync(final InternalRequest request, final HostHandle hostHandle); + + void setJwt(String jwt); + +} diff --git a/src/main/java/com/arangodb/internal/net/Connection.java b/core/src/main/java/com/arangodb/internal/net/Connection.java similarity index 72% rename from src/main/java/com/arangodb/internal/net/Connection.java rename to core/src/main/java/com/arangodb/internal/net/Connection.java index f790467f1..461c5ccea 100644 --- a/src/main/java/com/arangodb/internal/net/Connection.java +++ b/core/src/main/java/com/arangodb/internal/net/Connection.java @@ -1,38 +1,40 @@ -/* - * DISCLAIMER - * - * Copyright 2017 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.internal.net; - -import java.io.Closeable; -import java.io.IOException; - -import com.arangodb.internal.Host; - -/** - * @author Mark Vollmary - * - */ -public interface Connection extends Closeable { - - Host getHost(); - - void closeOnError() throws IOException; - -} +/* + * DISCLAIMER + * + * Copyright 2017 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.internal.net; + +import com.arangodb.arch.UsedInApi; +import com.arangodb.internal.InternalRequest; +import com.arangodb.internal.InternalResponse; + +import java.io.Closeable; +import java.util.concurrent.CompletableFuture; + +/** + * @author Mark Vollmary + */ +@UsedInApi +public interface Connection extends Closeable { + void setJwt(String jwt); + + CompletableFuture executeAsync(InternalRequest request); + + void release(); +} diff --git a/core/src/main/java/com/arangodb/internal/net/ConnectionFactory.java b/core/src/main/java/com/arangodb/internal/net/ConnectionFactory.java new file mode 100644 index 000000000..0e01ca824 --- /dev/null +++ b/core/src/main/java/com/arangodb/internal/net/ConnectionFactory.java @@ -0,0 +1,33 @@ +/* + * DISCLAIMER + * + * Copyright 2018 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.internal.net; + +import com.arangodb.arch.UsedInApi; +import com.arangodb.config.HostDescription; +import com.arangodb.internal.config.ArangoConfig; + +/** + * @author Mark Vollmary + */ +@UsedInApi +public interface ConnectionFactory { + Connection create(ArangoConfig config, HostDescription host, ConnectionPool pool); +} diff --git a/src/main/java/com/arangodb/internal/net/CommunicationProtocol.java b/core/src/main/java/com/arangodb/internal/net/ConnectionPool.java similarity index 71% rename from src/main/java/com/arangodb/internal/net/CommunicationProtocol.java rename to core/src/main/java/com/arangodb/internal/net/ConnectionPool.java index 10540b932..0db87c0c3 100644 --- a/src/main/java/com/arangodb/internal/net/CommunicationProtocol.java +++ b/core/src/main/java/com/arangodb/internal/net/ConnectionPool.java @@ -1,37 +1,42 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.internal.net; - -import java.io.Closeable; - -import com.arangodb.ArangoDBException; -import com.arangodb.velocystream.Request; -import com.arangodb.velocystream.Response; - -/** - * @author Mark Vollmary - * - */ -public interface CommunicationProtocol extends Closeable { - - Response execute(final Request request, HostHandle hostHandle) throws ArangoDBException; - -} +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.internal.net; + +import com.arangodb.arch.UsedInApi; + +import java.io.Closeable; +import java.util.concurrent.CompletableFuture; + +/** + * @author Mark Vollmary + */ +@UsedInApi +public interface ConnectionPool extends Closeable { + + Connection createConnection(); + + CompletableFuture connection(); + + void release(final Connection connection); + + void setJwt(String jwt); + +} diff --git a/core/src/main/java/com/arangodb/internal/net/ConnectionPoolImpl.java b/core/src/main/java/com/arangodb/internal/net/ConnectionPoolImpl.java new file mode 100644 index 000000000..9f22ee50a --- /dev/null +++ b/core/src/main/java/com/arangodb/internal/net/ConnectionPoolImpl.java @@ -0,0 +1,119 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.internal.net; + +import com.arangodb.ArangoDBException; +import com.arangodb.config.HostDescription; +import com.arangodb.internal.config.ArangoConfig; +import com.arangodb.internal.util.AsyncQueue; + +import java.io.IOException; +import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CopyOnWriteArrayList; + +public class ConnectionPoolImpl implements ConnectionPool { + + public static final int HTTP1_SLOTS = 1; // HTTP/1: max 1 pending request + public static final int HTTP1_SLOTS_PIPELINING = 10; // HTTP/1: max pipelining + public static final int HTTP2_SLOTS = 32; // HTTP/2: max streams, hard-coded see BTS-2049 + + private final AsyncQueue slots = new AsyncQueue<>(); + private final HostDescription host; + private final ArangoConfig config; + private final int maxConnections; + private final List connections; + private final ConnectionFactory factory; + private final int maxSlots; + private volatile String jwt = null; + private volatile boolean closed = false; + + public ConnectionPoolImpl(final HostDescription host, final ArangoConfig config, final ConnectionFactory factory) { + super(); + this.host = host; + this.config = config; + this.maxConnections = config.getMaxConnections(); + this.factory = factory; + connections = new CopyOnWriteArrayList<>(); + switch (config.getProtocol()) { + case HTTP_JSON: + case HTTP_VPACK: + maxSlots = config.getPipelining() ? HTTP1_SLOTS_PIPELINING : HTTP1_SLOTS; + break; + default: + maxSlots = HTTP2_SLOTS; + } + } + + @Override + public Connection createConnection() { + Connection c = factory.create(config, host, this); + c.setJwt(jwt); + return c; + } + + @Override + public CompletableFuture connection() { + if (closed) { + throw new ArangoDBException("Connection pool already closed!"); + } + + if (connections.size() < maxConnections) { + Connection connection = createConnection(); + connections.add(connection); + for (int i = 0; i < maxSlots; i++) { + slots.offer((connection)); + } + } + + return slots.poll(); + } + + @Override + public void release(Connection connection) { + slots.offer(connection); + } + + @Override + public void setJwt(String jwt) { + if (jwt != null) { + this.jwt = jwt; + for (Connection connection : connections) { + connection.setJwt(jwt); + } + } + } + + @Override + public void close() throws IOException { + closed = true; + for (final Connection connection : connections) { + connection.close(); + } + } + + @Override + public String toString() { + return "ConnectionPoolImpl [host=" + host + ", maxConnections=" + maxConnections + ", connections=" + + connections.size() + ", factory=" + factory.getClass().getSimpleName() + "]"; + } + +} diff --git a/core/src/main/java/com/arangodb/internal/net/DirtyReadHostHandler.java b/core/src/main/java/com/arangodb/internal/net/DirtyReadHostHandler.java new file mode 100644 index 000000000..b54354dcb --- /dev/null +++ b/core/src/main/java/com/arangodb/internal/net/DirtyReadHostHandler.java @@ -0,0 +1,93 @@ +/* + * DISCLAIMER + * + * Copyright 2018 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.internal.net; + +import com.arangodb.config.HostDescription; + +import java.io.IOException; + +/** + * @author Mark Vollmary + */ +public class DirtyReadHostHandler implements HostHandler { + + private final HostHandler master; + private final HostHandler follower; + private AccessType currentAccessType; + + public DirtyReadHostHandler(final HostHandler master, final HostHandler follower) { + super(); + this.master = master; + this.follower = follower; + } + + private HostHandler determineHostHandler() { + if (currentAccessType == AccessType.DIRTY_READ) { + return follower; + } + return master; + } + + @Override + public Host get(final HostHandle hostHandle, final AccessType accessType) { + this.currentAccessType = accessType; + return determineHostHandler().get(hostHandle, accessType); + } + + @Override + public void checkNext(HostHandle hostHandle, AccessType accessType) { + this.currentAccessType = accessType; + determineHostHandler().checkNext(hostHandle, accessType); + } + + @Override + public void success() { + determineHostHandler().success(); + } + + @Override + public void fail(Exception exception) { + determineHostHandler().fail(exception); + } + + @Override + public void failIfNotMatch(HostDescription host, Exception exception) { + determineHostHandler().failIfNotMatch(host, exception); + } + + @Override + public void reset() { + determineHostHandler().reset(); + } + + @Override + public void close() throws IOException { + master.close(); + follower.close(); + } + + @Override + public void setJwt(String jwt) { + master.setJwt(jwt); + follower.setJwt(jwt); + } + +} diff --git a/core/src/main/java/com/arangodb/internal/net/ExtendedHostResolver.java b/core/src/main/java/com/arangodb/internal/net/ExtendedHostResolver.java new file mode 100644 index 000000000..7ba5b58de --- /dev/null +++ b/core/src/main/java/com/arangodb/internal/net/ExtendedHostResolver.java @@ -0,0 +1,159 @@ +/* + * DISCLAIMER + * + * Copyright 2017 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.internal.net; + +import com.arangodb.ArangoDBException; +import com.arangodb.config.HostDescription; +import com.arangodb.internal.ArangoExecutorSync; +import com.arangodb.internal.ArangoRequestParam; +import com.arangodb.internal.config.ArangoConfig; +import com.arangodb.internal.serde.InternalSerde; +import com.arangodb.internal.util.HostUtils; +import com.arangodb.internal.InternalRequest; +import com.arangodb.internal.RequestType; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.*; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ScheduledFuture; +import java.util.concurrent.TimeUnit; + +import static com.arangodb.internal.serde.SerdeUtils.constructParametricType; + +/** + * @author Mark Vollmary + */ +public class ExtendedHostResolver implements HostResolver { + + private static final Logger LOGGER = LoggerFactory.getLogger(ExtendedHostResolver.class); + + private final HostSet hosts; + + private final ArangoConfig config; + private final ConnectionFactory connectionFactory; + private final Integer acquireHostListInterval; + private final ScheduledExecutorService scheduler; + private ArangoExecutorSync executor; + private InternalSerde arangoSerialization; + private ScheduledFuture schedule; + + public ExtendedHostResolver(final List hosts, final ArangoConfig config, + final ConnectionFactory connectionFactory, Integer acquireHostListInterval) { + + this.acquireHostListInterval = acquireHostListInterval; + this.hosts = new HostSet(hosts); + this.config = config; + this.connectionFactory = connectionFactory; + + scheduler = Executors.newSingleThreadScheduledExecutor(r -> { + Thread t = Executors.defaultThreadFactory().newThread(r); + t.setDaemon(true); + return t; + } + ); + } + + @Override + public void init(ArangoExecutorSync executor, InternalSerde arangoSerialization) { + this.executor = executor; + this.arangoSerialization = arangoSerialization; + resolve(); + schedule = scheduler.scheduleAtFixedRate(this::resolve, acquireHostListInterval, acquireHostListInterval, TimeUnit.MILLISECONDS); + } + + @Override + public void close() { + schedule.cancel(false); + scheduler.shutdown(); + } + + @Override + public HostSet getHosts() { + return hosts; + } + + private void resolve() { + final Collection endpoints = resolveFromServer(); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("Resolve {} Endpoints", endpoints.size()); + LOGGER.debug("Endpoints {}", Arrays.deepToString(endpoints.toArray())); + } + + if (!endpoints.isEmpty()) { + hosts.markAllForDeletion(); + } + + for (final String endpoint : endpoints) { + LOGGER.debug("Create HOST from {}", endpoint); + + if (endpoint.matches(".*://.+:[0-9]+")) { + + final String[] s = endpoint.replaceAll(".*://", "").split(":"); + if (s.length == 2) { + final HostDescription description = new HostDescription(s[0], Integer.parseInt(s[1])); + hosts.addHost(HostUtils.createHost(description, config, connectionFactory)); + } else if (s.length == 4) { + // IPV6 Address - TODO: we need a proper function to resolve AND support IPV4 & IPV6 functions + // globally + final HostDescription description = new HostDescription("127.0.0.1", Integer.parseInt(s[3])); + hosts.addHost(HostUtils.createHost(description, config, connectionFactory)); + } else { + LOGGER.warn("Skip Endpoint (Missing Port) {}", endpoint); + } + + } else { + LOGGER.warn("Skip Endpoint (Format) {}", endpoint); + } + } + hosts.clearAllMarkedForDeletion(); + } + + private Collection resolveFromServer() { + Collection response; + try { + response = executor.execute( + new InternalRequest(ArangoRequestParam.SYSTEM, RequestType.GET, "/_api/cluster/endpoints"), + (r) -> { + final List> tmp = arangoSerialization.deserialize(r.getBody(), + "/endpoints", + constructParametricType(List.class, + constructParametricType(Map.class, String.class, String.class))); + Collection endpoints = new ArrayList<>(); + for (final Map map : tmp) { + endpoints.add(map.get("endpoint")); + } + return endpoints; + }, null); + } catch (final ArangoDBException e) { + final Integer responseCode = e.getResponseCode(); + // responseCode == 403: single server < 3.7 + // responseCode == 501: single server >= 3.7 + if (responseCode != null && (responseCode == 403 || responseCode == 501)) { + response = Collections.emptyList(); + } else { + throw e; + } + } + return response; + } +} diff --git a/core/src/main/java/com/arangodb/internal/net/FallbackHostHandler.java b/core/src/main/java/com/arangodb/internal/net/FallbackHostHandler.java new file mode 100644 index 000000000..f1d9b3b75 --- /dev/null +++ b/core/src/main/java/com/arangodb/internal/net/FallbackHostHandler.java @@ -0,0 +1,113 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.internal.net; + +import com.arangodb.ArangoDBException; +import com.arangodb.ArangoDBMultipleException; +import com.arangodb.config.HostDescription; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.CopyOnWriteArrayList; + +/** + * @author Mark Vollmary + */ +public class FallbackHostHandler implements HostHandler { + + private final HostResolver resolver; + private final List lastFailExceptions; + private Host current; + private Host lastSuccess; + private int iterations; + private HostSet hosts; + + public FallbackHostHandler(final HostResolver resolver) { + this.resolver = resolver; + lastFailExceptions = new CopyOnWriteArrayList<>(); + reset(); + hosts = resolver.getHosts(); + current = lastSuccess = hosts.getHostsList().get(0); + } + + @Override + public Host get(final HostHandle hostHandle, AccessType accessType) { + checkNext(hostHandle, accessType); + if (current.isMarkforDeletion()) { + fail(new ArangoDBException("Host marked for deletion")); + } + return current; + } + + @Override + public void checkNext(HostHandle hostHandle, AccessType accessType) { + if (current == lastSuccess && iterations >= 3) { + ArangoDBException e = ArangoDBException.of("Cannot contact any host!", + new ArangoDBMultipleException(new ArrayList<>(lastFailExceptions))); + reset(); + throw e; + } + } + + @Override + public void success() { + lastSuccess = current; + reset(); + } + + @Override + public void fail(Exception exception) { + hosts = resolver.getHosts(); + final List hostList = hosts.getHostsList(); + final int index = hostList.indexOf(current) + 1; + final boolean inBound = index < hostList.size(); + current = hostList.get(inBound ? index : 0); + if (!inBound) { + iterations++; + } + lastFailExceptions.add(exception); + } + + @Override + public synchronized void failIfNotMatch(HostDescription host, Exception exception) { + if (!host.equals(current.getDescription())) { + fail(exception); + } + } + + @Override + public void reset() { + iterations = 0; + lastFailExceptions.clear(); + } + + @Override + public void close() { + hosts.close(); + resolver.close(); + } + + @Override + public void setJwt(String jwt) { + hosts.setJwt(jwt); + } + +} diff --git a/core/src/main/java/com/arangodb/internal/net/Host.java b/core/src/main/java/com/arangodb/internal/net/Host.java new file mode 100644 index 000000000..b2afdd8e1 --- /dev/null +++ b/core/src/main/java/com/arangodb/internal/net/Host.java @@ -0,0 +1,46 @@ +/* + * DISCLAIMER + * + * Copyright 2018 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.internal.net; + +import com.arangodb.arch.UsedInApi; +import com.arangodb.config.HostDescription; + +import java.io.IOException; +import java.util.concurrent.CompletableFuture; + +/** + * @author Mark Vollmary + */ +@UsedInApi +public interface Host { + + HostDescription getDescription(); + + CompletableFuture connection(); + + void close() throws IOException; + + boolean isMarkforDeletion(); + + void setMarkforDeletion(boolean markforDeletion); + + void setJwt(String jwt); +} diff --git a/core/src/main/java/com/arangodb/internal/net/HostHandle.java b/core/src/main/java/com/arangodb/internal/net/HostHandle.java new file mode 100644 index 000000000..c7de4f38a --- /dev/null +++ b/core/src/main/java/com/arangodb/internal/net/HostHandle.java @@ -0,0 +1,47 @@ +/* + * DISCLAIMER + * + * Copyright 2017 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.internal.net; + +import com.arangodb.arch.UsedInApi; +import com.arangodb.config.HostDescription; + +/** + * @author Mark Vollmary + */ +@UsedInApi +public class HostHandle { + + private HostDescription host; + + public HostHandle() { + super(); + } + + public HostDescription getHost() { + return host; + } + + public HostHandle setHost(final HostDescription host) { + this.host = host; + return this; + } + +} diff --git a/src/main/java/com/arangodb/entity/DocumentEntity.java b/core/src/main/java/com/arangodb/internal/net/HostHandler.java similarity index 58% rename from src/main/java/com/arangodb/entity/DocumentEntity.java rename to core/src/main/java/com/arangodb/internal/net/HostHandler.java index 7720dae9c..d9ec2ddb7 100644 --- a/src/main/java/com/arangodb/entity/DocumentEntity.java +++ b/core/src/main/java/com/arangodb/internal/net/HostHandler.java @@ -1,54 +1,50 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.entity; - -import com.arangodb.entity.DocumentField.Type; - -/** - * @author Mark Vollmary - * - */ -public class DocumentEntity { - - @DocumentField(Type.KEY) - private String key; - @DocumentField(Type.ID) - private String id; - @DocumentField(Type.REV) - private String rev; - - public DocumentEntity() { - super(); - } - - public String getKey() { - return key; - } - - public String getId() { - return id; - } - - public String getRev() { - return rev; - } - -} +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.internal.net; + +import com.arangodb.arch.UsedInApi; +import com.arangodb.config.HostDescription; + +import java.io.IOException; + +/** + * @author Mark Vollmary + */ +@UsedInApi +public interface HostHandler { + + Host get(HostHandle hostHandle, AccessType accessType); + + void checkNext(HostHandle hostHandle, AccessType accessType); + + void success(); + + void fail(Exception exception); + + void failIfNotMatch(HostDescription host, Exception exception); + + void reset(); + + void close() throws IOException; + + void setJwt(String jwt); + +} diff --git a/core/src/main/java/com/arangodb/internal/net/HostImpl.java b/core/src/main/java/com/arangodb/internal/net/HostImpl.java new file mode 100644 index 000000000..0277f8246 --- /dev/null +++ b/core/src/main/java/com/arangodb/internal/net/HostImpl.java @@ -0,0 +1,100 @@ +/* + * DISCLAIMER + * + * Copyright 2018 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.internal.net; + +import com.arangodb.config.HostDescription; + +import java.io.IOException; +import java.util.concurrent.CompletableFuture; + +/** + * @author Mark Vollmary + */ +public class HostImpl implements Host { + + private final ConnectionPool connectionPool; + private final HostDescription description; + private boolean markforDeletion = false; + + public HostImpl(final ConnectionPool connectionPool, final HostDescription description) { + super(); + this.connectionPool = connectionPool; + this.description = description; + } + + @Override + public void close() throws IOException { + connectionPool.close(); + } + + @Override + public HostDescription getDescription() { + return description; + } + + @Override + public CompletableFuture connection() { + return connectionPool.connection(); + } + + @Override + public String toString() { + return "HostImpl [connectionPool=" + connectionPool + ", description=" + description + ", markforDeletion=" + + markforDeletion + "]"; + } + + public boolean isMarkforDeletion() { + return markforDeletion; + } + + public void setMarkforDeletion(boolean markforDeletion) { + this.markforDeletion = markforDeletion; + } + + @Override + public void setJwt(String jwt) { + connectionPool.setJwt(jwt); + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + ((description == null) ? 0 : description.hashCode()); + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) + return true; + if (obj == null) + return false; + if (getClass() != obj.getClass()) + return false; + HostImpl other = (HostImpl) obj; + if (description == null) { + return other.description == null; + } else return description.equals(other.description); + } + + +} diff --git a/src/main/java/com/arangodb/internal/net/HostResolver.java b/core/src/main/java/com/arangodb/internal/net/HostResolver.java similarity index 66% rename from src/main/java/com/arangodb/internal/net/HostResolver.java rename to core/src/main/java/com/arangodb/internal/net/HostResolver.java index c71bbeee9..8ce7ac8c7 100644 --- a/src/main/java/com/arangodb/internal/net/HostResolver.java +++ b/core/src/main/java/com/arangodb/internal/net/HostResolver.java @@ -1,43 +1,41 @@ -/* - * DISCLAIMER - * - * Copyright 2017 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.internal.net; - -import java.util.Collection; -import java.util.List; - -import com.arangodb.ArangoDBException; -import com.arangodb.internal.Host; - -/** - * @author Mark Vollmary - * - */ -public interface HostResolver { - - public interface EndpointResolver { - Collection resolve(boolean closeConnections) throws ArangoDBException; - } - - void init(final EndpointResolver resolver); - - List resolve(boolean initial, boolean closeConnections); - -} +/* + * DISCLAIMER + * + * Copyright 2017 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.internal.net; + +import com.arangodb.arch.UsedInApi; +import com.arangodb.internal.ArangoExecutorSync; +import com.arangodb.internal.serde.InternalSerde; + +/** + * @author Mark Vollmary + */ +@UsedInApi +public interface HostResolver { + + default void init(ArangoExecutorSync executorSync, InternalSerde arangoSerialization) { + } + + default void close() { + } + + HostSet getHosts(); + +} diff --git a/core/src/main/java/com/arangodb/internal/net/HostSet.java b/core/src/main/java/com/arangodb/internal/net/HostSet.java new file mode 100644 index 000000000..d47612efc --- /dev/null +++ b/core/src/main/java/com/arangodb/internal/net/HostSet.java @@ -0,0 +1,104 @@ +package com.arangodb.internal.net; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; + +public class HostSet { + private static final Logger LOGGER = LoggerFactory.getLogger(HostSet.class); + + private final ArrayList hosts = new ArrayList<>(); + private volatile String jwt = null; + + public HostSet(List hosts) { + super(); + + for (Host host : hosts) { + addHost(host); + } + + } + + public List getHostsList() { + return Collections.unmodifiableList(hosts); + } + + public void addHost(Host newHost) { + if (hosts.contains(newHost)) { + LOGGER.debug("Host {} already in Set", newHost); + for (Host host : hosts) { + if (host.equals(newHost)) { + host.setMarkforDeletion(false); + } + } + } else { + newHost.setJwt(jwt); + hosts.add(newHost); + LOGGER.debug("Added Host {} - now {} Hosts in List", newHost, hosts.size()); + } + } + + public void close() { + LOGGER.debug("Close all Hosts in Set"); + + for (Host host : hosts) { + try { + + LOGGER.debug("Try to close Host {}", host); + host.close(); + + } catch (IOException e) { + LOGGER.warn("Error during closing the Host " + host, e); + } + } + } + + public void markAllForDeletion() { + + for (Host host : hosts) { + host.setMarkforDeletion(true); + } + + } + + public void clearAllMarkedForDeletion() { + + LOGGER.debug("Clear all Hosts in Set with markForDeletion"); + + Iterator iterable = hosts.iterator(); + while (iterable.hasNext()) { + Host host = iterable.next(); + if (host.isMarkforDeletion()) { + try { + LOGGER.debug("Try to close Host {}", host); + host.close(); + } catch (IOException e) { + LOGGER.warn("Error during closing the Host " + host, e); + } finally { + iterable.remove(); + } + } + } + + } + + public void clear() { + LOGGER.debug("Clear all Hosts in Set"); + + close(); + hosts.clear(); + } + + public void setJwt(String jwt) { + this.jwt = jwt; + for (Host h : hosts) { + h.setJwt(jwt); + } + } + +} diff --git a/core/src/main/java/com/arangodb/internal/net/ProtocolProvider.java b/core/src/main/java/com/arangodb/internal/net/ProtocolProvider.java new file mode 100644 index 000000000..9420a1cb5 --- /dev/null +++ b/core/src/main/java/com/arangodb/internal/net/ProtocolProvider.java @@ -0,0 +1,30 @@ +package com.arangodb.internal.net; + + +import com.arangodb.Protocol; +import com.arangodb.arch.UsedInApi; +import com.arangodb.config.ProtocolConfig; +import com.arangodb.internal.config.ArangoConfig; +import com.fasterxml.jackson.databind.Module; + +@UsedInApi +public interface ProtocolProvider { + + boolean supportsProtocol(Protocol protocol); + + /** + * @deprecated use {@link #createConnectionFactory(ProtocolConfig)} instead + */ + @Deprecated + default ConnectionFactory createConnectionFactory() { + throw new UnsupportedOperationException(); + } + + default ConnectionFactory createConnectionFactory(ProtocolConfig config) { + return createConnectionFactory(); + } + + CommunicationProtocol createProtocol(ArangoConfig config, HostHandler hostHandler); + + Module protocolModule(); +} diff --git a/core/src/main/java/com/arangodb/internal/net/RandomHostHandler.java b/core/src/main/java/com/arangodb/internal/net/RandomHostHandler.java new file mode 100644 index 000000000..b483765c4 --- /dev/null +++ b/core/src/main/java/com/arangodb/internal/net/RandomHostHandler.java @@ -0,0 +1,100 @@ +/* + * DISCLAIMER + * + * Copyright 2017 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.internal.net; + +import com.arangodb.config.HostDescription; + +import java.util.ArrayList; +import java.util.Collections; + +/** + * @author Mark Vollmary + */ +public class RandomHostHandler implements HostHandler { + + private final HostResolver resolver; + private final HostHandler fallback; + private Host current; + private HostSet hosts; + + public RandomHostHandler(final HostResolver resolver, final HostHandler fallback) { + super(); + this.resolver = resolver; + this.fallback = fallback; + hosts = resolver.getHosts(); + current = getRandomHost(); + } + + @Override + public Host get(final HostHandle hostHandle, AccessType accessType) { + if (current == null || current.isMarkforDeletion()) { + hosts = resolver.getHosts(); + current = getRandomHost(); + } + return current; + } + + @Override + public void checkNext(HostHandle hostHandle, AccessType accessType) { + } + + @Override + public void success() { + fallback.success(); + } + + @Override + public void fail(Exception exception) { + fallback.fail(exception); + current = fallback.get(null, null); + } + + @Override + public synchronized void failIfNotMatch(HostDescription host, Exception exception) { + if (!host.equals(current.getDescription())) { + fail(exception); + } + } + + private Host getRandomHost() { + final ArrayList hostList = new ArrayList<>(hosts.getHostsList()); + Collections.shuffle(hostList); + return hostList.get(0); + } + + @Override + public void reset() { + fallback.reset(); + } + + @Override + public void close() { + hosts.close(); + resolver.close(); + } + + @Override + public void setJwt(String jwt) { + fallback.setJwt(jwt); + hosts.setJwt(jwt); + } + +} diff --git a/core/src/main/java/com/arangodb/internal/net/RoundRobinHostHandler.java b/core/src/main/java/com/arangodb/internal/net/RoundRobinHostHandler.java new file mode 100644 index 000000000..ec70f8e13 --- /dev/null +++ b/core/src/main/java/com/arangodb/internal/net/RoundRobinHostHandler.java @@ -0,0 +1,124 @@ +/* + * DISCLAIMER + * + * Copyright 2017 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.internal.net; + +import com.arangodb.ArangoDBException; +import com.arangodb.ArangoDBMultipleException; +import com.arangodb.config.HostDescription; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.CopyOnWriteArrayList; + +/** + * @author Mark Vollmary + */ +public class RoundRobinHostHandler implements HostHandler { + + private final static Logger LOGGER = LoggerFactory.getLogger(RoundRobinHostHandler.class); + + private final HostResolver resolver; + private final List lastFailExceptions; + private long current; + private int fails; + private HostSet hosts; + + public RoundRobinHostHandler(final HostResolver resolver) { + super(); + this.resolver = resolver; + lastFailExceptions = new CopyOnWriteArrayList<>(); + hosts = resolver.getHosts(); + current = 0L; + reset(); + } + + @Override + public Host get(final HostHandle hostHandle, AccessType accessType) { + checkNext(hostHandle, accessType); + final int size = hosts.getHostsList().size(); + final int index = (int) ((current++) % size); + Host host = hosts.getHostsList().get(index); + if (hostHandle != null) { + final HostDescription hostDescription = hostHandle.getHost(); + if (hostDescription != null) { + for (int i = index; i < index + size; i++) { + host = hosts.getHostsList().get(i % size); + if (hostDescription.equals(host.getDescription())) { + break; + } + } + } else { + hostHandle.setHost(host.getDescription()); + } + } + LOGGER.debug("Returning host: {}", host); + return host; + } + + @Override + public void checkNext(HostHandle hostHandle, AccessType accessType) { + hosts = resolver.getHosts(); + final int size = hosts.getHostsList().size(); + + if (fails > size) { + ArangoDBException e = ArangoDBException.of("Cannot contact any host!", + new ArangoDBMultipleException(new ArrayList<>(lastFailExceptions))); + reset(); + throw e; + } + } + + @Override + public void success() { + reset(); + } + + @Override + public void fail(Exception exception) { + fails++; + lastFailExceptions.add(exception); + } + + @Override + public void failIfNotMatch(HostDescription host, Exception exception) { + fail(exception); + } + + @Override + public void reset() { + fails = 0; + lastFailExceptions.clear(); + } + + @Override + public void close() { + hosts.close(); + resolver.close(); + } + + @Override + public void setJwt(String jwt) { + hosts.setJwt(jwt); + } + +} diff --git a/src/main/java/com/arangodb/internal/net/HostHandle.java b/core/src/main/java/com/arangodb/internal/net/SimpleHostResolver.java similarity index 71% rename from src/main/java/com/arangodb/internal/net/HostHandle.java rename to core/src/main/java/com/arangodb/internal/net/SimpleHostResolver.java index 1569a790a..f7d594f48 100644 --- a/src/main/java/com/arangodb/internal/net/HostHandle.java +++ b/core/src/main/java/com/arangodb/internal/net/SimpleHostResolver.java @@ -1,46 +1,42 @@ -/* - * DISCLAIMER - * - * Copyright 2017 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.internal.net; - -import com.arangodb.internal.Host; - -/** - * @author Mark Vollmary - * - */ -public class HostHandle { - - private Host host; - - public HostHandle() { - super(); - } - - public Host getHost() { - return host; - } - - public HostHandle setHost(final Host host) { - this.host = host; - return this; - } - -} +/* + * DISCLAIMER + * + * Copyright 2017 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.internal.net; + +import java.util.List; + +/** + * @author Mark Vollmary + */ +public class SimpleHostResolver implements HostResolver { + + private final List hosts; + + public SimpleHostResolver(final List hosts) { + super(); + this.hosts = hosts; + } + + @Override + public HostSet getHosts() { + return new HostSet(hosts); + } + +} diff --git a/core/src/main/java/com/arangodb/internal/serde/ContentTypeFactory.java b/core/src/main/java/com/arangodb/internal/serde/ContentTypeFactory.java new file mode 100644 index 000000000..453cc2a0f --- /dev/null +++ b/core/src/main/java/com/arangodb/internal/serde/ContentTypeFactory.java @@ -0,0 +1,24 @@ +package com.arangodb.internal.serde; + +import com.arangodb.ContentType; +import com.arangodb.Protocol; + +public final class ContentTypeFactory { + private ContentTypeFactory() { + } + + public static ContentType of(Protocol protocol) { + switch (protocol) { + case HTTP_JSON: + case HTTP2_JSON: + return ContentType.JSON; + case VST: + case HTTP_VPACK: + case HTTP2_VPACK: + return ContentType.VPACK; + default: + throw new IllegalArgumentException("Unexpected value: " + protocol); + } + } + +} diff --git a/core/src/main/java/com/arangodb/internal/serde/InternalAnnotationIntrospector.java b/core/src/main/java/com/arangodb/internal/serde/InternalAnnotationIntrospector.java new file mode 100644 index 000000000..d0655cfd5 --- /dev/null +++ b/core/src/main/java/com/arangodb/internal/serde/InternalAnnotationIntrospector.java @@ -0,0 +1,66 @@ +package com.arangodb.internal.serde; + +import com.fasterxml.jackson.databind.introspect.Annotated; +import com.fasterxml.jackson.databind.introspect.JacksonAnnotationIntrospector; + +import java.io.IOException; +import java.io.ObjectInputStream; +import java.io.ObjectOutputStream; + +class InternalAnnotationIntrospector extends JacksonAnnotationIntrospector { + + private final transient UserDataSerializer userDataSerializer; + private final transient UserDataDeserializer userDataDeserializer; + + InternalAnnotationIntrospector( + final UserDataSerializer userDataSerializer, + final UserDataDeserializer userDataDeserializer + ) { + this.userDataSerializer = userDataSerializer; + this.userDataDeserializer = userDataDeserializer; + } + + @Override + public Object findSerializer(Annotated a) { + if (a.getAnnotation(UserData.class) != null) { + return userDataSerializer; + } else { + return super.findSerializer(a); + } + } + + @Override + public Object findContentSerializer(Annotated a) { + if (a.getAnnotation(UserDataInside.class) != null) { + return userDataSerializer; + } else { + return super.findContentSerializer(a); + } + } + + @Override + public Object findDeserializer(Annotated a) { + if (a.getAnnotation(UserData.class) != null) { + return userDataDeserializer; + } else { + return super.findDeserializer(a); + } + } + + @Override + public Object findContentDeserializer(Annotated a) { + if (a.getAnnotation(UserDataInside.class) != null) { + return userDataDeserializer; + } else { + return super.findContentDeserializer(a); + } + } + + private void writeObject(ObjectOutputStream out) throws IOException { + throw new IOException("Serialization not allowed"); + } + + private void readObject(ObjectInputStream in) throws IOException { + throw new IOException("Deserialization not allowed"); + } +} diff --git a/core/src/main/java/com/arangodb/internal/serde/InternalDeserializers.java b/core/src/main/java/com/arangodb/internal/serde/InternalDeserializers.java new file mode 100644 index 000000000..20b3ce3b7 --- /dev/null +++ b/core/src/main/java/com/arangodb/internal/serde/InternalDeserializers.java @@ -0,0 +1,158 @@ +package com.arangodb.internal.serde; + +import com.arangodb.entity.CollectionStatus; +import com.arangodb.entity.CollectionType; +import com.arangodb.entity.InvertedIndexPrimarySort; +import com.arangodb.entity.ReplicationFactor; +import com.arangodb.entity.arangosearch.CollectionLink; +import com.arangodb.entity.arangosearch.FieldLink; +import com.arangodb.util.RawBytes; +import com.arangodb.util.RawJson; +import com.arangodb.internal.InternalResponse; +import com.fasterxml.jackson.core.JsonFactory; +import com.fasterxml.jackson.core.JsonGenerator; +import com.fasterxml.jackson.core.JsonParser; +import com.fasterxml.jackson.core.TreeNode; +import com.fasterxml.jackson.databind.DeserializationContext; +import com.fasterxml.jackson.databind.JsonDeserializer; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.node.*; + +import java.io.IOException; +import java.io.StringWriter; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Iterator; +import java.util.Map; + +public final class InternalDeserializers { + + static final JsonDeserializer RAW_JSON_DESERIALIZER = new JsonDeserializer() { + @Override + public RawJson deserialize(JsonParser p, DeserializationContext ctxt) throws IOException { + if (JsonFactory.FORMAT_NAME_JSON.equals(p.getCodec().getFactory().getFormatName())) { + return RawJson.of(new String(SerdeUtils.extractBytes(p), StandardCharsets.UTF_8)); + } else { + StringWriter w = new StringWriter(); + try (JsonGenerator gen = SerdeUtils.INSTANCE.getJsonMapper().getFactory().createGenerator(w)) { + gen.copyCurrentStructure(p); + gen.flush(); + } + return RawJson.of(w.toString()); + } + } + }; + + static final JsonDeserializer RAW_BYTES_DESERIALIZER = new JsonDeserializer() { + @Override + public RawBytes deserialize(JsonParser p, DeserializationContext ctxt) throws IOException { + return RawBytes.of(SerdeUtils.extractBytes(p)); + } + }; + + static final JsonDeserializer COLLECTION_STATUS = new JsonDeserializer() { + @Override + public CollectionStatus deserialize(final JsonParser p, final DeserializationContext ctxt) throws IOException { + return CollectionStatus.fromStatus(p.getIntValue()); + } + }; + + static final JsonDeserializer COLLECTION_TYPE = new JsonDeserializer() { + @Override + public CollectionType deserialize(final JsonParser p, final DeserializationContext ctxt) throws IOException { + return CollectionType.fromType(p.getIntValue()); + } + }; + + static final JsonDeserializer REPLICATION_FACTOR = new JsonDeserializer() { + @Override + public ReplicationFactor deserialize(final JsonParser p, final DeserializationContext ctxt) throws IOException { + TreeNode node = p.readValueAsTree(); + if (node instanceof NumericNode) { + return ReplicationFactor.of(((NumericNode) node).intValue()); + } else if (node instanceof TextNode && "satellite".equals(((TextNode) node).textValue())) { + return ReplicationFactor.ofSatellite(); + } else throw new IllegalArgumentException(); + } + }; + + @SuppressWarnings("unchecked") + static final JsonDeserializer RESPONSE = new JsonDeserializer() { + @Override + public InternalResponse deserialize(final JsonParser p, final DeserializationContext ctxt) throws IOException { + final InternalResponse response = new InternalResponse(); + Iterator it = ((ArrayNode) p.readValueAsTree()).iterator(); + response.setVersion(it.next().intValue()); + response.setType(it.next().intValue()); + response.setResponseCode(it.next().intValue()); + if (it.hasNext()) { + response.putMetas(readTreeAsValue(p, ctxt, it.next(), Map.class)); + } + return response; + } + }; + + static final JsonDeserializer INVERTED_INDEX_PRIMARY_SORT_FIELD = new JsonDeserializer() { + @Override + public InvertedIndexPrimarySort.Field deserialize(final JsonParser p, final DeserializationContext ctxt) throws IOException { + ObjectNode tree = p.readValueAsTree(); + String field = tree.get("field").textValue(); + InvertedIndexPrimarySort.Field.Direction direction = tree.get("asc").booleanValue() ? + InvertedIndexPrimarySort.Field.Direction.asc : InvertedIndexPrimarySort.Field.Direction.desc; + return new InvertedIndexPrimarySort.Field(field, direction); + } + }; + + private InternalDeserializers() { + } + + private static T readTreeAsValue(JsonParser p, DeserializationContext ctxt, JsonNode n, Class targetType) throws IOException { + try (TreeTraversingParser t = new TreeTraversingParser(n, p.getCodec())) { + t.nextToken(); + return ctxt.readValue(t, targetType); + } + } + + public static class CollectionLinksDeserializer extends JsonDeserializer> { + + @Override + public Collection deserialize(JsonParser p, DeserializationContext ctxt) throws IOException { + Collection out = new ArrayList<>(); + ObjectNode tree = p.readValueAsTree(); + Iterator> it = tree.fields(); + while (it.hasNext()) { + Map.Entry e = it.next(); + ObjectNode v = (ObjectNode) e.getValue(); + v.put("name", e.getKey()); + out.add(readTreeAsValue(p, ctxt, v, CollectionLink.class)); + } + return out; + } + } + + public static class FieldLinksDeserializer extends JsonDeserializer { + + @Override + public FieldLink[] deserialize(JsonParser p, DeserializationContext ctxt) throws IOException { + Collection out = new ArrayList<>(); + ObjectNode tree = p.readValueAsTree(); + Iterator> it = tree.fields(); + while (it.hasNext()) { + Map.Entry e = it.next(); + ObjectNode v = (ObjectNode) e.getValue(); + v.put("name", e.getKey()); + out.add(readTreeAsValue(p, ctxt, v, FieldLink.class)); + } + return out.toArray(new FieldLink[0]); + } + } + + public static class CollectionSchemaRuleDeserializer extends JsonDeserializer { + @Override + public String deserialize(JsonParser p, DeserializationContext ctxt) throws IOException { + return SerdeUtils.INSTANCE.writeJson(p.readValueAsTree()); + } + } + +} diff --git a/core/src/main/java/com/arangodb/internal/serde/InternalMapperProvider.java b/core/src/main/java/com/arangodb/internal/serde/InternalMapperProvider.java new file mode 100644 index 000000000..e7b0fbda9 --- /dev/null +++ b/core/src/main/java/com/arangodb/internal/serde/InternalMapperProvider.java @@ -0,0 +1,47 @@ +package com.arangodb.internal.serde; + +import com.arangodb.ArangoDBException; +import com.arangodb.ContentType; +import com.fasterxml.jackson.core.JsonFactory; +import com.fasterxml.jackson.databind.ObjectMapper; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.Iterator; +import java.util.ServiceConfigurationError; +import java.util.ServiceLoader; + +class InternalMapperProvider { + private static final Logger LOG = LoggerFactory.getLogger(InternalMapperProvider.class); + + static ObjectMapper of(final ContentType contentType) { + String formatName; + if (contentType == ContentType.JSON) { + formatName = "JSON"; + } else if (contentType == ContentType.VPACK) { + formatName = "Velocypack"; + } else { + throw new IllegalArgumentException("Unexpected value: " + contentType); + } + + ServiceLoader sl = ServiceLoader.load(JsonFactory.class); + Iterator iterator = sl.iterator(); + while (iterator.hasNext()) { + JsonFactory jf; + try { + jf = iterator.next(); + } catch (ServiceConfigurationError e) { + LOG.warn("ServiceLoader failed to load JsonFactory", e); + continue; + } + if (formatName.equals(jf.getFormatName())) { + if (contentType == ContentType.JSON) { + JacksonUtils.tryConfigureJsonFactory(jf); + } + return new ObjectMapper(jf); + } + LOG.debug("Required format ({}) not supported by JsonFactory: {}", formatName, jf.getClass().getName()); + } + throw new ArangoDBException("No JsonFactory found for content type: " + contentType); + } +} diff --git a/core/src/main/java/com/arangodb/internal/serde/InternalModule.java b/core/src/main/java/com/arangodb/internal/serde/InternalModule.java new file mode 100644 index 000000000..392a9c334 --- /dev/null +++ b/core/src/main/java/com/arangodb/internal/serde/InternalModule.java @@ -0,0 +1,36 @@ +package com.arangodb.internal.serde; + +import com.arangodb.entity.CollectionStatus; +import com.arangodb.entity.CollectionType; +import com.arangodb.entity.InvertedIndexPrimarySort; +import com.arangodb.entity.MultiDocumentEntity; +import com.arangodb.entity.ReplicationFactor; +import com.arangodb.util.RawBytes; +import com.arangodb.util.RawJson; +import com.arangodb.internal.InternalRequest; +import com.arangodb.internal.InternalResponse; +import com.fasterxml.jackson.databind.Module; +import com.fasterxml.jackson.databind.module.SimpleModule; + +class InternalModule { + + static Module get(InternalSerde serde) { + SimpleModule module = new SimpleModule(); + + module.addDeserializer(MultiDocumentEntity.class, new MultiDocumentEntityDeserializer(serde)); + + module.addSerializer(RawJson.class, InternalSerializers.RAW_JSON_SERIALIZER); + module.addSerializer(InternalRequest.class, InternalSerializers.REQUEST); + module.addSerializer(CollectionType.class, InternalSerializers.COLLECTION_TYPE); + + module.addDeserializer(RawJson.class, InternalDeserializers.RAW_JSON_DESERIALIZER); + module.addDeserializer(RawBytes.class, InternalDeserializers.RAW_BYTES_DESERIALIZER); + module.addDeserializer(CollectionStatus.class, InternalDeserializers.COLLECTION_STATUS); + module.addDeserializer(CollectionType.class, InternalDeserializers.COLLECTION_TYPE); + module.addDeserializer(ReplicationFactor.class, InternalDeserializers.REPLICATION_FACTOR); + module.addDeserializer(InternalResponse.class, InternalDeserializers.RESPONSE); + module.addDeserializer(InvertedIndexPrimarySort.Field.class, InternalDeserializers.INVERTED_INDEX_PRIMARY_SORT_FIELD); + + return module; + } +} diff --git a/core/src/main/java/com/arangodb/internal/serde/InternalParameterizedType.java b/core/src/main/java/com/arangodb/internal/serde/InternalParameterizedType.java new file mode 100644 index 000000000..82c0dcf4c --- /dev/null +++ b/core/src/main/java/com/arangodb/internal/serde/InternalParameterizedType.java @@ -0,0 +1,31 @@ +package com.arangodb.internal.serde; + +import java.lang.reflect.ParameterizedType; +import java.lang.reflect.Type; + +class InternalParameterizedType implements ParameterizedType { + + private final Class rawType; + private final Type[] actualRawArguments; + + InternalParameterizedType(final Class rawType, final Type[] actualRawArguments) { + this.rawType = rawType; + this.actualRawArguments = actualRawArguments; + } + + @Override + public Type getRawType() { + return rawType; + } + + @Override + public Type[] getActualTypeArguments() { + return actualRawArguments; + } + + @Override + public Type getOwnerType() { + return null; + } + +} diff --git a/core/src/main/java/com/arangodb/internal/serde/InternalSerde.java b/core/src/main/java/com/arangodb/internal/serde/InternalSerde.java new file mode 100644 index 000000000..1459e9970 --- /dev/null +++ b/core/src/main/java/com/arangodb/internal/serde/InternalSerde.java @@ -0,0 +1,143 @@ +package com.arangodb.internal.serde; + +import com.arangodb.arch.UsedInApi; +import com.arangodb.serde.ArangoSerde; +import com.arangodb.ContentType; +import com.fasterxml.jackson.databind.JavaType; +import com.fasterxml.jackson.databind.JsonNode; + +import java.lang.reflect.Type; + +@UsedInApi +public interface InternalSerde extends ArangoSerde { + + /** + * Used for logging and debugging. + * + * @param content byte array + * @return JSON string + * @implSpec return {@code "[Unparsable data]"} in case of parsing exception + */ + String toJsonString(byte[] content); + + /** + * Extract the nested content pointed by the json pointer. + * Used for extracting nested user data. + * + * @param content byte array + * @param jsonPointer location of data to be extracted + * @return byte array + */ + byte[] extract(byte[] content, String jsonPointer); + + /** + * Deserializes the content and binds it to the target data type. + * For data type {@link ContentType#JSON}, the byte array is the JSON string encoded using the UTF-8 charset. + * + * @param content byte array to deserialize + * @param type target data type + * @return deserialized object + */ + T deserialize(byte[] content, Type type); + + /** + * Deserializes the parsed json node and binds it to the target data type. + * + * @param node parsed json node + * @param clazz class of target data type + * @return deserialized object + */ + default T deserialize(JsonNode node, Class clazz) { + return deserialize(node, (Type) clazz); + } + + /** + * Deserializes the parsed json node and binds it to the target data type. + * + * @param node parsed json node + * @param type target data type + * @return deserialized object + */ + T deserialize(JsonNode node, Type type); + + /** + * Parses the content at json pointer. + * + * @param content VPack or byte encoded JSON string + * @param jsonPointer location of data to be parsed + * @return root of the parsed tree + */ + JsonNode parse(byte[] content, String jsonPointer); + + /** + * Deserializes the content at json pointer and binds it to the target data type. + * For data type {@link ContentType#JSON}, the byte array is the JSON string encoded using the UTF-8 charset. + * + * @param content byte array to deserialize + * @param jsonPointer location of data to be deserialized + * @param clazz class of target data type + * @return deserialized object + */ + default T deserialize(byte[] content, String jsonPointer, Class clazz) { + return deserialize(content, jsonPointer, (Type) clazz); + } + + /** + * Deserializes the content at json pointer and binds it to the target data type. + * For data type {@link ContentType#JSON}, the byte array is the JSON string encoded using the UTF-8 charset. + * + * @param content byte array to deserialize + * @param jsonPointer location of data to be deserialized + * @param type target data type + * @return deserialized object + */ + default T deserialize(byte[] content, String jsonPointer, Type type) { + return deserialize(extract(content, jsonPointer), type); + } + + /** + * Serializes the object into the target data type, using the user serde. + * + * @param value object to serialize + * @return serialized byte array + */ + byte[] serializeUserData(Object value); + + /** + * Serializes each element in the collection using the user serde. + * + * @param value objects to serialize + * @return serialized byte array + */ + byte[] serializeCollectionUserData(Iterable value); + + /** + * Deserializes the content and binds it to the target data type, using the user serde. + * + * @param content byte array to deserialize + * @param clazz class of target data type + * @return deserialized object + */ + T deserializeUserData(byte[] content, Class clazz); + + /** + * Deserializes the content and binds it to the target data type, using the user serde. + * + * @param content byte array to deserialize + * @param clazz class of target data type + * @return deserialized object + */ + T deserializeUserData(byte[] content, JavaType clazz); + + + /** + * @param content byte array to deserialize + * @return whether the content represents a document (i.e. it has at least one field name equal to _id, _key, _rev) + */ + boolean isDocument(byte[] content); + + /** + * @return the user serde + */ + ArangoSerde getUserSerde(); +} diff --git a/core/src/main/java/com/arangodb/internal/serde/InternalSerdeImpl.java b/core/src/main/java/com/arangodb/internal/serde/InternalSerdeImpl.java new file mode 100644 index 000000000..8bd24ba31 --- /dev/null +++ b/core/src/main/java/com/arangodb/internal/serde/InternalSerdeImpl.java @@ -0,0 +1,260 @@ +package com.arangodb.internal.serde; + +import com.arangodb.ArangoDBException; +import com.arangodb.internal.RequestContextHolder; +import com.arangodb.serde.ArangoSerde; +import com.arangodb.util.RawBytes; +import com.arangodb.util.RawJson; +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.core.JsonFactory; +import com.fasterxml.jackson.core.JsonGenerator; +import com.fasterxml.jackson.core.JsonParser; +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.core.JsonToken; +import com.fasterxml.jackson.databind.DeserializationFeature; +import com.fasterxml.jackson.databind.JavaType; +import com.fasterxml.jackson.databind.JsonMappingException; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.Module; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.datatype.jsonp.JSONPModule; +import jakarta.json.JsonException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.lang.reflect.Type; +import java.nio.charset.StandardCharsets; + +import static com.arangodb.internal.serde.SerdeUtils.checkSupportedJacksonVersion; +import static com.arangodb.internal.serde.SerdeUtils.extractBytes; + +final class InternalSerdeImpl implements InternalSerde { + private static final Logger LOG = LoggerFactory.getLogger(InternalSerdeImpl.class); + + static { + checkSupportedJacksonVersion(); + } + + private final ArangoSerde userSerde; + private final ObjectMapper mapper; + + InternalSerdeImpl(final ObjectMapper mapper, final ArangoSerde userSerde, final Module protocolModule) { + this.mapper = mapper; + this.userSerde = userSerde; + mapper.deactivateDefaultTyping(); + mapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); + mapper.enable(JsonParser.Feature.INCLUDE_SOURCE_IN_LOCATION); + mapper.registerModule(InternalModule.get(this)); + if (protocolModule != null) { + mapper.registerModule(protocolModule); + } + mapper.setSerializationInclusion(JsonInclude.Include.NON_NULL); + mapper.setAnnotationIntrospector(new InternalAnnotationIntrospector( + new UserDataSerializer(this), + new UserDataDeserializer(this) + )); + + // JSON-P datatypes + try { + mapper.registerModule(new JSONPModule()); + } catch (JsonException e) { + LOG.debug("Jakarta JSON-P provider not found, handling of JSON-P datatypes is disabled", e); + } + } + + @Override + public byte[] serialize(final Object value) { + try { + return mapper.writeValueAsBytes(value); + } catch (JsonProcessingException e) { + throw ArangoDBException.of(e); + } + } + + @Override + public T deserialize(byte[] content, Class clazz) { + return deserialize(content, (Type) clazz); + } + + @Override + public String toJsonString(final byte[] content) { + if (content == null) { + return ""; + } + try { + return SerdeUtils.INSTANCE.writeJson(mapper.readTree(content)); + } catch (Exception e) { + return "[Unparsable data]"; + } + } + + @Override + public byte[] extract(final byte[] content, final String jsonPointer) { + if (!jsonPointer.startsWith("/")) { + throw new ArangoDBException("Unsupported JSON pointer: " + jsonPointer); + } + String[] parts = jsonPointer.substring(1).split("/"); + try (JsonParser parser = mapper.getFactory().createParser(content)) { + int match = 0; + int level = 0; + JsonToken token = parser.nextToken(); + if (token != JsonToken.START_OBJECT) { + throw new ArangoDBException("Unable to parse token: " + token); + } + while (true) { + token = parser.nextToken(); + if (token == JsonToken.START_OBJECT) { + level++; + } + if (token == JsonToken.END_OBJECT) { + level--; + } + if (token == null || level < match) { + throw new ArangoDBException("Unable to parse JSON pointer: " + jsonPointer); + } + if (token == JsonToken.FIELD_NAME && match == level && parts[match].equals(parser.getText())) { + match++; + if (match == parts.length) { + parser.nextToken(); + return extractBytes(parser); + } + } + } + } catch (IOException e) { + throw ArangoDBException.of(e); + } + } + + @Override + public JsonNode parse(byte[] content, String jsonPointer) { + try { + return mapper.readTree(content).at(jsonPointer); + } catch (IOException e) { + throw ArangoDBException.of(e); + } + } + + @Override + public byte[] serializeUserData(Object value) { + if (value == null) { + return serialize(null); + } + Class clazz = value.getClass(); + if (RawBytes.class.equals(clazz)) { + return ((RawBytes) value).get(); + } else if (RawJson.class.equals(clazz) && JsonFactory.FORMAT_NAME_JSON.equals(mapper.getFactory().getFormatName())) { + return ((RawJson) value).get().getBytes(StandardCharsets.UTF_8); + } else if (SerdeUtils.isManagedClass(clazz)) { + return serialize(value); + } else { + return userSerde.serialize(value); + } + } + + @Override + public byte[] serializeCollectionUserData(Iterable value) { + ByteArrayOutputStream os = new ByteArrayOutputStream(); + try (JsonGenerator gen = mapper.getFactory().createGenerator(os)) { + gen.writeStartArray(); + for (Object o : value) { + gen.writeRawValue(new RawUserDataValue(serializeUserData(o))); + } + gen.writeEndArray(); + gen.flush(); + } catch (IOException e) { + throw ArangoDBException.of(e); + } + return os.toByteArray(); + } + + @Override + public T deserializeUserData(byte[] content, Class clazz) { + if (SerdeUtils.isManagedClass(clazz)) { + return deserialize(content, clazz); + } else { + return userSerde.deserialize(content, clazz, RequestContextHolder.INSTANCE.getCtx()); + } + } + + @Override + @SuppressWarnings("unchecked") + public T deserializeUserData(byte[] content, JavaType clazz) { + try { + if (SerdeUtils.isManagedClass(clazz.getRawClass())) { + return mapper.readerFor(clazz).readValue(content); + } else { + return deserializeUserData(content, (Class) clazz.getRawClass()); + } + } catch (IOException e) { + throw ArangoDBException.of(e); + } + } + + @Override + public boolean isDocument(byte[] content) { + try (JsonParser p = mapper.getFactory().createParser(content)) { + if (p.nextToken() != JsonToken.START_OBJECT) { + return false; + } + + int level = 1; + while (level >= 1) { + JsonToken t = p.nextToken(); + if (level == 1 && t == JsonToken.FIELD_NAME) { + String fieldName = p.getText(); + if (fieldName.equals("_id") || fieldName.equals("_key") || fieldName.equals("_rev")) { + return true; + } + } + if (t.isStructStart()) { + level++; + } else if (t.isStructEnd()) { + level--; + } + } + + if (p.currentToken() != JsonToken.END_OBJECT) { + throw new JsonMappingException(p, "Expected END_OBJECT but got " + p.currentToken()); + } + } catch (IOException e) { + throw ArangoDBException.of(e); + } + return false; + } + + @Override + public ArangoSerde getUserSerde() { + return userSerde; + } + + @Override + public T deserialize(final JsonNode node, final Type type) { + try { + return mapper.readerFor(mapper.constructType(type)).readValue(node); + } catch (IOException e) { + throw ArangoDBException.of(e); + } + } + + @Override + @SuppressWarnings("unchecked") + public T deserialize(final byte[] content, final Type type) { + if (content == null || content.length == 0) { + return null; + } + if (RawBytes.class.equals(type)) { + return (T) RawBytes.of(content); + } else if (RawJson.class.equals(type) && JsonFactory.FORMAT_NAME_JSON.equals(mapper.getFactory().getFormatName())) { + return (T) RawJson.of(new String(content, StandardCharsets.UTF_8)); + } else { + try { + return mapper.readerFor(mapper.constructType(type)).readValue(content); + } catch (IOException e) { + throw ArangoDBException.of(e); + } + } + } + +} diff --git a/core/src/main/java/com/arangodb/internal/serde/InternalSerdeProvider.java b/core/src/main/java/com/arangodb/internal/serde/InternalSerdeProvider.java new file mode 100644 index 000000000..b19d627f0 --- /dev/null +++ b/core/src/main/java/com/arangodb/internal/serde/InternalSerdeProvider.java @@ -0,0 +1,45 @@ +package com.arangodb.internal.serde; + +import com.arangodb.ContentType; +import com.arangodb.serde.ArangoSerde; +import com.arangodb.serde.ArangoSerdeProvider; +import com.fasterxml.jackson.databind.Module; + +public class InternalSerdeProvider implements ArangoSerdeProvider { + + private final ContentType contentType; + + /** + * @param contentType serialization target data type + */ + public InternalSerdeProvider(final ContentType contentType) { + this.contentType = contentType; + } + + /** + * Creates a new InternalSerde with default settings. + * + * @return the created InternalSerde + */ + @Override + public InternalSerde create() { + return create(null, null); + } + + /** + * Creates a new InternalSerde with default settings. + * + * @param userSerde user serde + * @param protocolModule optional Jackson module to support protocol specific types + * @return the created InternalSerde + */ + public InternalSerde create(ArangoSerde userSerde, Module protocolModule) { + return new InternalSerdeImpl(InternalMapperProvider.of(contentType), userSerde, protocolModule); + } + + @Override + public ContentType getContentType() { + return contentType; + } + +} diff --git a/core/src/main/java/com/arangodb/internal/serde/InternalSerializers.java b/core/src/main/java/com/arangodb/internal/serde/InternalSerializers.java new file mode 100644 index 000000000..db156dff8 --- /dev/null +++ b/core/src/main/java/com/arangodb/internal/serde/InternalSerializers.java @@ -0,0 +1,98 @@ +package com.arangodb.internal.serde; + +import com.arangodb.entity.CollectionType; +import com.arangodb.entity.arangosearch.CollectionLink; +import com.arangodb.entity.arangosearch.FieldLink; +import com.arangodb.internal.ArangoRequestParam; +import com.arangodb.util.RawJson; +import com.arangodb.internal.InternalRequest; +import com.fasterxml.jackson.core.JsonFactory; +import com.fasterxml.jackson.core.JsonGenerator; +import com.fasterxml.jackson.core.JsonParser; +import com.fasterxml.jackson.databind.JsonSerializer; +import com.fasterxml.jackson.databind.SerializerProvider; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.Collection; +import java.util.HashMap; +import java.util.Map; +import java.util.Optional; + +public final class InternalSerializers { + + static final JsonSerializer RAW_JSON_SERIALIZER = new JsonSerializer() { + @Override + public void serialize(RawJson value, JsonGenerator gen, SerializerProvider serializers) throws IOException { + if (JsonFactory.FORMAT_NAME_JSON.equals(gen.getCodec().getFactory().getFormatName())) { + gen.writeRawValue(new RawUserDataValue(value.get().getBytes(StandardCharsets.UTF_8))); + } else { + try (JsonParser parser = SerdeUtils.INSTANCE.getJsonMapper().getFactory().createParser(value.get())) { + parser.nextToken(); + gen.copyCurrentStructure(parser); + } + } + } + }; + static final JsonSerializer REQUEST = new JsonSerializer() { + @Override + public void serialize(InternalRequest value, JsonGenerator gen, SerializerProvider serializers) throws IOException { + gen.writeStartArray(); + gen.writeNumber(value.getVersion()); + gen.writeNumber(value.getType()); + gen.writeString(Optional.ofNullable(value.getDbName()).orElse(ArangoRequestParam.SYSTEM)); + gen.writeNumber(value.getRequestType().getType()); + gen.writeString(value.getPath()); + gen.writeStartObject(); + for (final Map.Entry entry : value.getQueryParam().entrySet()) { + gen.writeStringField(entry.getKey(), entry.getValue()); + } + gen.writeEndObject(); + gen.writeStartObject(); + for (final Map.Entry entry : value.getHeaderParam().entrySet()) { + gen.writeStringField(entry.getKey(), entry.getValue()); + } + gen.writeEndObject(); + gen.writeEndArray(); + } + }; + static final JsonSerializer COLLECTION_TYPE = new JsonSerializer() { + @Override + public void serialize(CollectionType value, JsonGenerator gen, SerializerProvider serializers) throws IOException { + gen.writeNumber(value.getType()); + } + }; + + private InternalSerializers() { + } + + public static class CollectionSchemaRuleSerializer extends JsonSerializer { + @Override + public void serialize(String value, JsonGenerator gen, SerializerProvider serializers) throws IOException { + gen.writeTree(SerdeUtils.INSTANCE.parseJson(value)); + } + } + + public static class FieldLinksSerializer extends JsonSerializer> { + @Override + public void serialize(Collection value, JsonGenerator gen, SerializerProvider serializers) throws IOException { + Map mapLikeValue = new HashMap<>(); + for (FieldLink fl : value) { + mapLikeValue.put(fl.getName(), fl); + } + gen.writeObject(mapLikeValue); + } + } + + public static class CollectionLinksSerializer extends JsonSerializer> { + @Override + public void serialize(Collection value, JsonGenerator gen, SerializerProvider serializers) throws IOException { + Map mapLikeValue = new HashMap<>(); + for (CollectionLink cl : value) { + mapLikeValue.put(cl.getName(), cl); + } + gen.writeObject(mapLikeValue); + } + } + +} diff --git a/core/src/main/java/com/arangodb/internal/serde/JacksonUtils.java b/core/src/main/java/com/arangodb/internal/serde/JacksonUtils.java new file mode 100644 index 000000000..a0db1c8eb --- /dev/null +++ b/core/src/main/java/com/arangodb/internal/serde/JacksonUtils.java @@ -0,0 +1,127 @@ +package com.arangodb.internal.serde; + +import com.arangodb.internal.ShadedProxy; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public final class JacksonUtils { + private static final Logger LOG = LoggerFactory.getLogger(JacksonUtils.class); + + private JacksonUtils() { + } + + public interface Version { + int getMajorVersion(); + + int getMinorVersion(); + + String toString(); + } + + public interface StreamReadConstraints { + + interface Static { + Builder builder(); + } + + interface Builder { + Builder maxNumberLength(final int maxNumLen); + + Builder maxStringLength(int maxStringLen); + + Builder maxNestingDepth(int maxNestingDepth); + + Builder maxNameLength(int maxNameLen); + + Builder maxDocumentLength(long maxDocLen); + + StreamReadConstraints build(); + } + } + + public interface StreamWriteConstraints { + interface Static { + Builder builder(); + } + + interface Builder { + Builder maxNestingDepth(int maxNestingDepth); + + StreamWriteConstraints build(); + } + } + + public interface JsonFactory { + Version version(); + + @SuppressWarnings("UnusedReturnValue") + JsonFactory setStreamReadConstraints(StreamReadConstraints src); + + @SuppressWarnings("UnusedReturnValue") + JsonFactory setStreamWriteConstraints(StreamWriteConstraints swc); + } + + /** + * Configure JsonFactory with permissive StreamReadConstraints and StreamWriteConstraints. + * It uses reflection to avoid compilation errors with older Jackson versions. + * It uses dynamic package names to be compatible with shaded Jackson. + * + * @param jf JsonFactory to configure + */ + public static void tryConfigureJsonFactory(Object jf) { + try { + configureJsonFactory(jf); + } catch (Throwable t) { + LOG.warn("Got exception while configuring JsonFactory, skipping...", t); + } + } + + private static void configureJsonFactory(Object jf) throws Exception { + JsonFactory proxy = ShadedProxy.of(JsonFactory.class, jf); + Version version = proxy.version(); + LOG.debug("Detected Jackson version: {}", version); + + // get pkg name dynamically, to support shaded Jackson + String basePkg = jf.getClass().getPackage().getName(); + + if (isAtLeastVersion(version, 2, 15)) { + Class srcClass = Class.forName(basePkg + "." + StreamReadConstraints.class.getSimpleName()); + StreamReadConstraints.Builder builder = ShadedProxy.of(StreamReadConstraints.Static.class, srcClass) + .builder() + .maxNumberLength(Integer.MAX_VALUE) + .maxStringLength(Integer.MAX_VALUE) + .maxNestingDepth(Integer.MAX_VALUE); + if (isAtLeastVersion(version, 2, 16)) { + builder = builder + .maxNameLength(Integer.MAX_VALUE) + .maxDocumentLength(Long.MAX_VALUE); + } else { + LOG.debug("Skipping configuring StreamReadConstraints maxNameLength"); + LOG.debug("Skipping configuring StreamReadConstraints maxDocumentLength"); + } + proxy.setStreamReadConstraints(builder.build()); + } else { + LOG.debug("Skipping configuring StreamReadConstraints"); + } + + if (isAtLeastVersion(version, 2, 16)) { + LOG.debug("Configuring StreamWriteConstraints ..."); + Class swcClass = Class.forName(basePkg + "." + StreamWriteConstraints.class.getSimpleName()); + StreamWriteConstraints swc = ShadedProxy.of(StreamWriteConstraints.Static.class, swcClass) + .builder() + .maxNestingDepth(Integer.MAX_VALUE) + .build(); + proxy.setStreamWriteConstraints(swc); + } else { + LOG.debug("Skipping configuring StreamWriteConstraints"); + } + } + + @SuppressWarnings("SameParameterValue") + private static boolean isAtLeastVersion(Version version, int major, int minor) { + int currentMajor = version.getMajorVersion(); + int currentMinor = version.getMinorVersion(); + return currentMajor > major || (currentMajor == major && currentMinor >= minor); + } + +} diff --git a/core/src/main/java/com/arangodb/internal/serde/MultiDocumentEntityDeserializer.java b/core/src/main/java/com/arangodb/internal/serde/MultiDocumentEntityDeserializer.java new file mode 100644 index 000000000..ca650569d --- /dev/null +++ b/core/src/main/java/com/arangodb/internal/serde/MultiDocumentEntityDeserializer.java @@ -0,0 +1,69 @@ +package com.arangodb.internal.serde; + +import com.arangodb.entity.ErrorEntity; +import com.arangodb.entity.MultiDocumentEntity; +import com.fasterxml.jackson.core.JsonParser; +import com.fasterxml.jackson.core.JsonToken; +import com.fasterxml.jackson.databind.BeanProperty; +import com.fasterxml.jackson.databind.DeserializationContext; +import com.fasterxml.jackson.databind.JavaType; +import com.fasterxml.jackson.databind.JsonDeserializer; +import com.fasterxml.jackson.databind.JsonMappingException; +import com.fasterxml.jackson.databind.deser.ContextualDeserializer; + +import java.io.IOException; + +public class MultiDocumentEntityDeserializer extends JsonDeserializer> implements ContextualDeserializer { + private final JavaType containedType; + private final InternalSerde serde; + + MultiDocumentEntityDeserializer(InternalSerde serde) { + this(serde, null); + } + + MultiDocumentEntityDeserializer(InternalSerde serde, JavaType containedType) { + this.serde = serde; + this.containedType = containedType; + } + + @Override + public MultiDocumentEntity deserialize(JsonParser p, DeserializationContext ctxt) throws IOException { + MultiDocumentEntity multiDocument = new MultiDocumentEntity<>(); + + // silent=true returns an empty object + if (p.currentToken() == JsonToken.START_OBJECT) { + if (p.nextToken() == JsonToken.END_OBJECT) { + return multiDocument; + } else { + throw new JsonMappingException(p, "Unexpected token sequence: START_OBJECT, " + p.currentToken()); + } + } + + if (p.currentToken() != JsonToken.START_ARRAY) { + throw new JsonMappingException(p, "Expected START_ARRAY but got " + p.currentToken()); + } + p.nextToken(); + while (p.currentToken() != JsonToken.END_ARRAY) { + if (p.currentToken() != JsonToken.START_OBJECT) { + throw new JsonMappingException(p, "Expected START_OBJECT but got " + p.currentToken()); + } + byte[] element = SerdeUtils.extractBytes(p); + if (serde.isDocument(element)) { + Object d = serde.deserializeUserData(element, containedType); + multiDocument.getDocuments().add(d); + multiDocument.getDocumentsAndErrors().add(d); + } else { + ErrorEntity e = serde.deserialize(element, ErrorEntity.class); + multiDocument.getErrors().add(e); + multiDocument.getDocumentsAndErrors().add(e); + } + p.nextToken(); // END_OBJECT + } + return multiDocument; + } + + @Override + public JsonDeserializer createContextual(DeserializationContext ctxt, BeanProperty property) { + return new MultiDocumentEntityDeserializer(serde, ctxt.getContextualType().containedType(0)); + } +} diff --git a/core/src/main/java/com/arangodb/internal/serde/RawUserDataValue.java b/core/src/main/java/com/arangodb/internal/serde/RawUserDataValue.java new file mode 100644 index 000000000..4bfde90f2 --- /dev/null +++ b/core/src/main/java/com/arangodb/internal/serde/RawUserDataValue.java @@ -0,0 +1,92 @@ +package com.arangodb.internal.serde; + +import com.fasterxml.jackson.core.SerializableString; + +import java.io.IOException; +import java.io.OutputStream; +import java.nio.ByteBuffer; + +class RawUserDataValue implements SerializableString { + private final byte[] data; + + RawUserDataValue(byte[] data) { + this.data = data; + } + + @Override + public String getValue() { + throw new UnsupportedOperationException(); + } + + @Override + public int charLength() { + throw new UnsupportedOperationException(); + } + + @Override + public char[] asQuotedChars() { + throw new UnsupportedOperationException(); + } + + @Override + public byte[] asUnquotedUTF8() { + return data; + } + + @Override + public byte[] asQuotedUTF8() { + throw new UnsupportedOperationException(); + } + + @Override + public int appendQuotedUTF8(byte[] buffer, int offset) { + throw new UnsupportedOperationException(); + } + + @Override + public int appendQuoted(char[] buffer, int offset) { + throw new UnsupportedOperationException(); + } + + @Override + public int appendUnquotedUTF8(byte[] buffer, int offset) { + final int length = data.length; + if ((offset + length) > buffer.length) { + return -1; + } + System.arraycopy(data, 0, buffer, offset, length); + return length; + } + + @Override + public int appendUnquoted(char[] buffer, int offset) { + throw new UnsupportedOperationException(); + } + + @Override + public int writeQuotedUTF8(OutputStream out) { + throw new UnsupportedOperationException(); + } + + @Override + public int writeUnquotedUTF8(OutputStream out) throws IOException { + final int length = data.length; + out.write(data, 0, length); + return length; + } + + @Override + public int putQuotedUTF8(ByteBuffer buffer) { + throw new UnsupportedOperationException(); + } + + @Override + public int putUnquotedUTF8(ByteBuffer buffer) { + final int length = data.length; + if (length > buffer.remaining()) { + return -1; + } + buffer.put(data, 0, length); + return length; + } +} diff --git a/core/src/main/java/com/arangodb/internal/serde/SerdeUtils.java b/core/src/main/java/com/arangodb/internal/serde/SerdeUtils.java new file mode 100644 index 000000000..7f652a745 --- /dev/null +++ b/core/src/main/java/com/arangodb/internal/serde/SerdeUtils.java @@ -0,0 +1,147 @@ +package com.arangodb.internal.serde; + +import com.arangodb.ArangoDBException; +import com.arangodb.entity.BaseDocument; +import com.arangodb.entity.BaseEdgeDocument; +import com.arangodb.util.RawBytes; +import com.arangodb.util.RawJson; +import com.fasterxml.jackson.core.JsonFactory; +import com.fasterxml.jackson.core.JsonParser; +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.core.JsonToken; +import com.fasterxml.jackson.databind.JavaType; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.type.TypeFactory; +import jakarta.json.JsonValue; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.lang.reflect.Type; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +public enum SerdeUtils { + INSTANCE; + + private static final Logger LOGGER = LoggerFactory.getLogger(SerdeUtils.class); + + private final ObjectMapper jsonMapper = new ObjectMapper(); + + public static Type constructListType(Class clazz) { + return TypeFactory.defaultInstance().constructCollectionType(List.class, clazz); + } + + public static Type constructParametricType(Class rawType, Type... rawArgs) { + if (rawArgs == null || rawArgs.length == 0 || rawArgs[0] == null) { + return rawType; + } else { + return new InternalParameterizedType(rawType, rawArgs); + } + } + + public static Type convertToType(final JavaType javaType) { + List args = new ArrayList<>(); + for (JavaType it : javaType.getBindings().getTypeParameters()) { + Type type = convertToType(it); + args.add(type); + } + return constructParametricType(javaType.getRawClass(), args.toArray(new Type[0])); + } + + static void checkSupportedJacksonVersion() { + Arrays.asList( + com.fasterxml.jackson.databind.cfg.PackageVersion.VERSION, + com.fasterxml.jackson.core.json.PackageVersion.VERSION + ).forEach(version -> { + int major = version.getMajorVersion(); + int minor = version.getMinorVersion(); + if (major != 2 || minor < 10 || minor > 19) { + LOGGER.warn("Unsupported Jackson version: {}", version); + } + }); + } + + public ObjectMapper getJsonMapper() { + return jsonMapper; + } + + /** + * Parse a JSON string. + * + * @param json JSON string to parse + * @return root of the parsed tree + */ + public JsonNode parseJson(final String json) { + try { + return jsonMapper.readTree(json); + } catch (JsonProcessingException e) { + throw ArangoDBException.of(e); + } + } + + /** + * @param data JsonNode + * @return JSON string + */ + public String writeJson(final JsonNode data) { + try { + return jsonMapper.writeValueAsString(data); + } catch (JsonProcessingException e) { + throw ArangoDBException.of(e); + } + } + + /** + * Extract raw bytes for the current JSON (or VPACK) node + * + * @param parser JsonParser with current token pointing to the node to extract + * @return byte array + */ + @SuppressWarnings("deprecation") + public static byte[] extractBytes(JsonParser parser) throws IOException { + JsonToken t = parser.currentToken(); + if (t.isStructEnd() || t == JsonToken.FIELD_NAME) { + throw new ArangoDBException("Unexpected token: " + t); + } + byte[] data = (byte[]) parser.getTokenLocation().getSourceRef(); + int start = (int) parser.getTokenLocation().getByteOffset(); + int end = (int) parser.getCurrentLocation().getByteOffset(); + if (t.isStructStart()) { + int open = 1; + while (open > 0) { + t = parser.nextToken(); + if (t.isStructStart()) { + open++; + } else if (t.isStructEnd()) { + open--; + } + } + } + parser.finishToken(); + if (JsonFactory.FORMAT_NAME_JSON.equals(parser.getCodec().getFactory().getFormatName())) { + end = (int) parser.getCurrentLocation().getByteOffset(); + } + return Arrays.copyOfRange(data, start, end); + } + + public static boolean isManagedClass(Class clazz) { + return JsonNode.class.isAssignableFrom(clazz) || // jackson datatypes + JsonValue.class.isAssignableFrom(clazz) || // JSON-B datatypes + RawJson.class.equals(clazz) || + RawBytes.class.equals(clazz) || + BaseDocument.class.equals(clazz) || + BaseEdgeDocument.class.equals(clazz) || + isEntityClass(clazz); + } + + private static boolean isEntityClass(Class clazz) { + Package pkg = clazz.getPackage(); + if (pkg == null) { + return false; + } + return pkg.getName().startsWith("com.arangodb.entity"); + } +} diff --git a/core/src/main/java/com/arangodb/internal/serde/UserData.java b/core/src/main/java/com/arangodb/internal/serde/UserData.java new file mode 100644 index 000000000..da9f7dc86 --- /dev/null +++ b/core/src/main/java/com/arangodb/internal/serde/UserData.java @@ -0,0 +1,14 @@ +package com.arangodb.internal.serde; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * Marker annotation for fields that need to be serialized/deserialized using the user serde. + */ +@Target({ElementType.METHOD, ElementType.FIELD}) +@Retention(RetentionPolicy.RUNTIME) +public @interface UserData { +} diff --git a/core/src/main/java/com/arangodb/internal/serde/UserDataDeserializer.java b/core/src/main/java/com/arangodb/internal/serde/UserDataDeserializer.java new file mode 100644 index 000000000..ecb8c83f3 --- /dev/null +++ b/core/src/main/java/com/arangodb/internal/serde/UserDataDeserializer.java @@ -0,0 +1,50 @@ +package com.arangodb.internal.serde; + +import com.fasterxml.jackson.core.JsonParser; +import com.fasterxml.jackson.databind.BeanProperty; +import com.fasterxml.jackson.databind.DeserializationContext; +import com.fasterxml.jackson.databind.JavaType; +import com.fasterxml.jackson.databind.JsonDeserializer; +import com.fasterxml.jackson.databind.deser.ContextualDeserializer; +import com.fasterxml.jackson.databind.jsontype.TypeDeserializer; + +import java.io.IOException; +import java.lang.reflect.Type; + +import static com.arangodb.internal.serde.SerdeUtils.convertToType; + +class UserDataDeserializer extends JsonDeserializer implements ContextualDeserializer { + private final Type targetType; + private final InternalSerde serde; + + UserDataDeserializer(final InternalSerde serde) { + targetType = null; + this.serde = serde; + } + + private UserDataDeserializer(final JavaType targetType, final InternalSerde serde) { + this.targetType = convertToType(targetType); + this.serde = serde; + } + + @Override + public Object deserialize(JsonParser p, DeserializationContext ctxt) throws IOException { + Class clazz = (Class) targetType; + if (SerdeUtils.isManagedClass(clazz)) { + return p.readValueAs(clazz); + } else { + return serde.deserializeUserData(SerdeUtils.extractBytes(p), clazz); + } + } + + @Override + public Object deserializeWithType(JsonParser p, DeserializationContext ctxt, TypeDeserializer typeDeserializer) throws IOException { + return deserialize(p, ctxt); + } + + @Override + public JsonDeserializer createContextual(DeserializationContext ctxt, BeanProperty property) { + return new UserDataDeserializer(ctxt.getContextualType(), serde); + } + +} diff --git a/core/src/main/java/com/arangodb/internal/serde/UserDataInside.java b/core/src/main/java/com/arangodb/internal/serde/UserDataInside.java new file mode 100644 index 000000000..cdfd83b43 --- /dev/null +++ b/core/src/main/java/com/arangodb/internal/serde/UserDataInside.java @@ -0,0 +1,19 @@ +package com.arangodb.internal.serde; + +import com.fasterxml.jackson.annotation.JacksonAnnotationsInside; +import com.fasterxml.jackson.annotation.JsonInclude; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * Marker annotation for collections or map fields whose values need to be serialized/deserialized using the user serde. + */ +@Target({ElementType.METHOD, ElementType.FIELD}) +@Retention(RetentionPolicy.RUNTIME) +@JsonInclude(value = JsonInclude.Include.NON_NULL) +@JacksonAnnotationsInside +public @interface UserDataInside { +} diff --git a/core/src/main/java/com/arangodb/internal/serde/UserDataSerializer.java b/core/src/main/java/com/arangodb/internal/serde/UserDataSerializer.java new file mode 100644 index 000000000..501998da4 --- /dev/null +++ b/core/src/main/java/com/arangodb/internal/serde/UserDataSerializer.java @@ -0,0 +1,25 @@ +package com.arangodb.internal.serde; + +import com.fasterxml.jackson.core.JsonGenerator; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.JsonSerializer; +import com.fasterxml.jackson.databind.SerializerProvider; + +import java.io.IOException; + +class UserDataSerializer extends JsonSerializer { + private final InternalSerde serde; + + UserDataSerializer(InternalSerde serde) { + this.serde = serde; + } + + @Override + public void serialize(Object value, JsonGenerator gen, SerializerProvider serializers) throws IOException { + if (value != null && JsonNode.class.isAssignableFrom(value.getClass())) { + gen.writeTree((JsonNode) value); + } else { + gen.writeRawValue(new RawUserDataValue(serde.serializeUserData(value))); + } + } +} diff --git a/core/src/main/java/com/arangodb/internal/util/AsyncQueue.java b/core/src/main/java/com/arangodb/internal/util/AsyncQueue.java new file mode 100644 index 000000000..d3b1a223a --- /dev/null +++ b/core/src/main/java/com/arangodb/internal/util/AsyncQueue.java @@ -0,0 +1,45 @@ +package com.arangodb.internal.util; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.ArrayDeque; +import java.util.Queue; +import java.util.concurrent.*; + +public class AsyncQueue { + private static final Logger LOGGER = LoggerFactory.getLogger(AsyncQueue.class); + private final Queue> requests = new ConcurrentLinkedQueue<>(); + private final Queue offers = new ArrayDeque<>(); + + public synchronized CompletableFuture poll() { + LOGGER.trace("poll()"); + T o = offers.poll(); + if (o != null) { + LOGGER.trace("poll(): short-circuit: {}", o); + return CompletableFuture.completedFuture(o); + } + CompletableFuture r = new CompletableFuture<>(); + LOGGER.trace("poll(): enqueue request: {}", r); + requests.add(r); + return r; + } + + public void offer(T o) { + LOGGER.trace("offer({})", o); + CompletableFuture r = requests.poll(); + if (r == null) { + synchronized (this) { + r = requests.poll(); + if (r == null) { + LOGGER.trace("offer({}): enqueue", o); + offers.add(o); + } + } + } + if (r != null) { + LOGGER.trace("offer({}): short-circuit: {}", o, r); + r.complete(o); + } + } +} diff --git a/core/src/main/java/com/arangodb/internal/util/DocumentUtil.java b/core/src/main/java/com/arangodb/internal/util/DocumentUtil.java new file mode 100644 index 000000000..239ad7371 --- /dev/null +++ b/core/src/main/java/com/arangodb/internal/util/DocumentUtil.java @@ -0,0 +1,62 @@ +/* + * DISCLAIMER + * + * Copyright 2018 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.internal.util; + +import com.arangodb.ArangoDBException; + +import java.util.regex.Pattern; + +/** + * @author Mark Vollmary + */ +public final class DocumentUtil { + + public static final String REGEX_KEY = "[^/]+"; + public static final String REGEX_ID = "[^/]+/[^/]+"; + private static final String SLASH = "/"; + + private DocumentUtil() { + super(); + } + + public static void validateIndexId(final String id) { + validateName("index id", REGEX_ID, id); + } + + public static void validateDocumentKey(final String key) { + validateName("document key", REGEX_KEY, key); + } + + public static void validateDocumentId(final String id) { + validateName("document id", REGEX_ID, id); + } + + public static String createDocumentHandle(final String collection, final String key) { + validateDocumentKey(key); + return collection + SLASH + key; + } + + private static void validateName(final String type, final String regex, final CharSequence name) { + if (!Pattern.matches(regex, name)) { + throw new ArangoDBException(String.format("%s %s is not valid.", type, name)); + } + } +} diff --git a/core/src/main/java/com/arangodb/internal/util/EncodeUtils.java b/core/src/main/java/com/arangodb/internal/util/EncodeUtils.java new file mode 100644 index 000000000..11740abf2 --- /dev/null +++ b/core/src/main/java/com/arangodb/internal/util/EncodeUtils.java @@ -0,0 +1,55 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.internal.util; + +import com.arangodb.ArangoDBException; + +import java.io.UnsupportedEncodingException; +import java.net.URLEncoder; +import java.nio.charset.StandardCharsets; + +public class EncodeUtils { + private EncodeUtils() { + } + + /** + * Encodes a string by replacing each instance of certain characters by one, two, three, or four escape sequences + * representing the UTF-8 encoding of the character. + * It behaves the same as Javascript encodeURIComponent(). + * + * @param value string to encode + * @return encoded string + */ + public static String encodeURIComponent(final String value) { + try { + return URLEncoder.encode(value, StandardCharsets.UTF_8.name()) + .replace("+", "%20") + .replace("%21", "!") + .replace("%27", "'") + .replace("%28", "(") + .replace("%29", ")") + .replace("%7E", "~"); + } catch (UnsupportedEncodingException e) { + throw ArangoDBException.of(e); + } + } + +} diff --git a/core/src/main/java/com/arangodb/internal/util/HostUtils.java b/core/src/main/java/com/arangodb/internal/util/HostUtils.java new file mode 100644 index 000000000..591ed91af --- /dev/null +++ b/core/src/main/java/com/arangodb/internal/util/HostUtils.java @@ -0,0 +1,53 @@ +/* + * DISCLAIMER + * + * Copyright 2017 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.internal.util; + +import com.arangodb.config.HostDescription; +import com.arangodb.internal.config.ArangoConfig; +import com.arangodb.internal.net.*; + +/** + * @author Mark Vollmary + */ +public final class HostUtils { + + private HostUtils() { + super(); + } + + public static HostDescription createFromLocation(final String location) { + final HostDescription host; + if (location != null) { + final String[] tmp = location.replaceAll(".*://", "").replaceAll("/.*", "").split(":"); + host = tmp.length == 2 ? new HostDescription(tmp[0], Integer.parseInt(tmp[1])) : null; + } else { + host = null; + } + return host; + } + + public static Host createHost( + final HostDescription description, + final ArangoConfig config, + final ConnectionFactory factory) { + return new HostImpl(new ConnectionPoolImpl(description, config, factory), description); + } +} diff --git a/core/src/main/java/com/arangodb/internal/util/RequestUtils.java b/core/src/main/java/com/arangodb/internal/util/RequestUtils.java new file mode 100644 index 000000000..7074cfc3b --- /dev/null +++ b/core/src/main/java/com/arangodb/internal/util/RequestUtils.java @@ -0,0 +1,52 @@ +/* + * DISCLAIMER + * + * Copyright 2018 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.internal.util; + +import com.arangodb.internal.net.AccessType; +import com.arangodb.internal.InternalRequest; +import com.arangodb.internal.RequestType; + +/** + * @author Mark Vollmary + */ +public final class RequestUtils { + + public static final String HEADER_ALLOW_DIRTY_READ = "x-arango-allow-dirty-read"; + + private RequestUtils() { + super(); + } + + public static InternalRequest allowDirtyRead(final InternalRequest request) { + return request.putHeaderParam(HEADER_ALLOW_DIRTY_READ, "true"); + } + + public static AccessType determineAccessType(final InternalRequest request) { + if (request.containsHeaderParam(HEADER_ALLOW_DIRTY_READ)) { + return AccessType.DIRTY_READ; + } + if (request.getRequestType() == RequestType.GET) { + return AccessType.READ; + } + return AccessType.WRITE; + } + +} diff --git a/core/src/main/java/com/arangodb/internal/util/ResponseUtils.java b/core/src/main/java/com/arangodb/internal/util/ResponseUtils.java new file mode 100644 index 000000000..57b69c319 --- /dev/null +++ b/core/src/main/java/com/arangodb/internal/util/ResponseUtils.java @@ -0,0 +1,104 @@ +/* + * DISCLAIMER + * + * Copyright 2017 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.internal.util; + +import com.arangodb.ArangoDBException; +import com.arangodb.entity.ErrorEntity; +import com.arangodb.internal.ArangoErrors; +import com.arangodb.internal.InternalResponse; +import com.arangodb.internal.net.ArangoDBRedirectException; +import com.arangodb.internal.net.ArangoDBUnavailableException; +import com.arangodb.internal.serde.InternalSerde; + +import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; +import java.util.concurrent.TimeoutException; + +/** + * @author Mark Vollmary + */ +public final class ResponseUtils { + + private static final int ERROR_STATUS = 300; + private static final int ERROR_INTERNAL = 503; + private static final String HEADER_ENDPOINT = "x-arango-endpoint"; + private static final String CONTENT_TYPE = "content-type"; + private static final String TEXT_PLAIN = "text/plain"; + + private ResponseUtils() { + super(); + } + + public static ArangoDBException translateError(InternalSerde serde, InternalResponse response) { + final int responseCode = response.getResponseCode(); + if (responseCode < ERROR_STATUS) { + return null; + } + if (responseCode == ERROR_INTERNAL && response.containsMeta(HEADER_ENDPOINT)) { + return new ArangoDBRedirectException(String.format("Response Code: %s", responseCode), + response.getMeta(HEADER_ENDPOINT)); + } + + byte[] body = response.getBody(); + if (body == null) { + return new ArangoDBException(String.format("Response Code: %s", responseCode), responseCode); + } + + if (isTextPlain(response)) { + String payload = new String(body, getContentTypeCharset(response)); + return new ArangoDBException("Response Code: " + responseCode + "[" + payload + "]", responseCode); + } + + ErrorEntity errorEntity; + try { + errorEntity = serde.deserialize(body, ErrorEntity.class); + } catch (Exception e) { + ArangoDBException adbEx = new ArangoDBException("Response Code: " + responseCode + + " [Unparsable data] Response: " + response, responseCode); + adbEx.addSuppressed(e); + return adbEx; + } + + if (errorEntity.getCode() == ERROR_INTERNAL && errorEntity.getErrorNum() == ERROR_INTERNAL) { + return ArangoDBUnavailableException.from(errorEntity); + } + ArangoDBException e = new ArangoDBException(errorEntity); + if (ArangoErrors.QUEUE_TIME_VIOLATED.equals(e.getErrorNum())) { + return ArangoDBException.of(new TimeoutException().initCause(e)); + } + return e; + } + + private static boolean isTextPlain(InternalResponse response) { + String contentType = response.getMeta(CONTENT_TYPE); + return contentType != null && contentType.startsWith(TEXT_PLAIN); + } + + private static Charset getContentTypeCharset(InternalResponse response) { + String contentType = response.getMeta(CONTENT_TYPE); + int paramIdx = contentType.indexOf("charset="); + if (paramIdx == -1) { + return StandardCharsets.UTF_8; + } + return Charset.forName(contentType.substring(paramIdx + 8)); + } + +} diff --git a/core/src/main/java/com/arangodb/model/AbstractMDIndexOptions.java b/core/src/main/java/com/arangodb/model/AbstractMDIndexOptions.java new file mode 100644 index 000000000..159a781e7 --- /dev/null +++ b/core/src/main/java/com/arangodb/model/AbstractMDIndexOptions.java @@ -0,0 +1,142 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.model; + +import com.arangodb.arch.NoRawTypesInspection; +import com.arangodb.entity.IndexType; + + +/** + * @author Michele Rastelli + * @since ArangoDB 3.12 + */ +@NoRawTypesInspection +public abstract class AbstractMDIndexOptions> extends IndexOptions { + + private Iterable fields; + private Boolean unique; + private MDIFieldValueTypes fieldValueTypes; + private Boolean estimates; + private Boolean sparse; + private Iterable storedValues; + + + protected AbstractMDIndexOptions() { + super(); + } + + public abstract IndexType getType(); + + public Iterable getFields() { + return fields; + } + + /** + * @param fields A list of attribute names used for each dimension. Array expansions are not allowed. + * @return options + */ + T fields(final Iterable fields) { + this.fields = fields; + return getThis(); + } + + public Boolean getUnique() { + return unique; + } + + /** + * @param unique if true, then create a unique index + * @return options + */ + public T unique(final Boolean unique) { + this.unique = unique; + return getThis(); + } + + public MDIFieldValueTypes getFieldValueTypes() { + return fieldValueTypes; + } + + /** + * @param fieldValueTypes must be {@link MDIFieldValueTypes#DOUBLE}, currently only doubles are supported as values. + * @return options + */ + public T fieldValueTypes(final MDIFieldValueTypes fieldValueTypes) { + this.fieldValueTypes = fieldValueTypes; + return getThis(); + } + + public Boolean getEstimates() { + return estimates; + } + + /** + * @param estimates controls whether index selectivity estimates are maintained for the index. Not maintaining index + * selectivity estimates can have a slightly positive impact on write performance. + * The downside of turning off index selectivity estimates is that the query optimizer is not able + * to determine the usefulness of different competing indexes in AQL queries when there are + * multiple candidate indexes to choose from. + * The estimates attribute is optional and defaults to true if not set. + * It cannot be disabled for non-unique multi-dimensional indexes because they have a fixed + * selectivity estimate of 1. + * @return options + */ + public T estimates(final Boolean estimates) { + this.estimates = estimates; + return getThis(); + } + + public Boolean getSparse() { + return sparse; + } + + /** + * @param sparse if true, then create a sparse index + * @return options + */ + public T sparse(final Boolean sparse) { + this.sparse = sparse; + return getThis(); + } + + public Iterable getStoredValues() { + return storedValues; + } + + /** + * @param storedValues can contain an array of paths to additional attributes to store in the index. + * These additional attributes cannot be used for index lookups or for sorting, but they can be + * used for projections. This allows an index to fully cover more queries and avoid extra + * document lookups. + * You can have the same attributes in storedValues and fields as the attributes in fields + * cannot be used for projections, but you can also store additional attributes that are not + * listed in fields. + * Attributes in storedValues cannot overlap with the attributes specified in prefixFields. + * Non-existing attributes are stored as null values inside storedValues. + * The maximum number of attributes in storedValues is 32. + * @return options + */ + public T storedValues(final Iterable storedValues) { + this.storedValues = storedValues; + return getThis(); + } + +} diff --git a/core/src/main/java/com/arangodb/model/AqlFunctionCreateOptions.java b/core/src/main/java/com/arangodb/model/AqlFunctionCreateOptions.java new file mode 100644 index 000000000..7357ca898 --- /dev/null +++ b/core/src/main/java/com/arangodb/model/AqlFunctionCreateOptions.java @@ -0,0 +1,79 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.model; + +/** + * @author Mark Vollmary + */ +public final class AqlFunctionCreateOptions { + + private String name; + private String code; + private Boolean isDeterministic; + + public AqlFunctionCreateOptions() { + super(); + } + + /** + * @param name the fully qualified name of the user functions + * @return options + */ + AqlFunctionCreateOptions name(final String name) { + this.name = name; + return this; + } + + public String getName() { + return name; + } + + /** + * @param code a string representation of the function body + * @return options + */ + AqlFunctionCreateOptions code(final String code) { + this.code = code; + return this; + } + + public String getCode() { + return code; + } + + /** + * @param isDeterministic an optional boolean value to indicate that the function results are fully deterministic + * (function + * return value solely depends on the input value and return value is the same for + * repeated calls with + * same input) + * @return options + */ + public AqlFunctionCreateOptions isDeterministic(final Boolean isDeterministic) { + this.isDeterministic = isDeterministic; + return this; + } + + public Boolean getIsDeterministic() { + return isDeterministic; + } + +} diff --git a/src/main/java/com/arangodb/model/CollectionsReadOptions.java b/core/src/main/java/com/arangodb/model/AqlFunctionDeleteOptions.java similarity index 53% rename from src/main/java/com/arangodb/model/CollectionsReadOptions.java rename to core/src/main/java/com/arangodb/model/AqlFunctionDeleteOptions.java index 3b32a8d99..b3d168a38 100644 --- a/src/main/java/com/arangodb/model/CollectionsReadOptions.java +++ b/core/src/main/java/com/arangodb/model/AqlFunctionDeleteOptions.java @@ -1,51 +1,49 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.model; - -/** - * @author Mark Vollmary - * - * @see API - * Documentation - */ -public class CollectionsReadOptions { - - private Boolean excludeSystem; - - public CollectionsReadOptions() { - super(); - } - - public Boolean getExcludeSystem() { - return excludeSystem; - } - - /** - * @param excludeSystem - * Whether or not system collections should be excluded from the result. - * @return options - */ - public CollectionsReadOptions excludeSystem(final Boolean excludeSystem) { - this.excludeSystem = excludeSystem; - return this; - } - -} +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.model; + +/** + * @author Mark Vollmary + */ +public final class AqlFunctionDeleteOptions { + + private Boolean group; + + public AqlFunctionDeleteOptions() { + super(); + } + + public Boolean getGroup() { + return group; + } + + /** + * @param group If set to true, then the function name provided in name is treated as a namespace prefix, and all + * functions in the specified namespace will be deleted. If set to false, the function name provided in + * name must be fully qualified, including any namespaces. + * @return options + */ + public AqlFunctionDeleteOptions group(final Boolean group) { + this.group = group; + return this; + } + +} diff --git a/src/main/java/com/arangodb/model/VertexCollectionCreateOptions.java b/core/src/main/java/com/arangodb/model/AqlFunctionGetOptions.java similarity index 62% rename from src/main/java/com/arangodb/model/VertexCollectionCreateOptions.java rename to core/src/main/java/com/arangodb/model/AqlFunctionGetOptions.java index 819c71aa5..9f845ff4a 100644 --- a/src/main/java/com/arangodb/model/VertexCollectionCreateOptions.java +++ b/core/src/main/java/com/arangodb/model/AqlFunctionGetOptions.java @@ -1,49 +1,47 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.model; - -/** - * @author Mark Vollmary - * - */ -public class VertexCollectionCreateOptions { - - private String collection; - - public VertexCollectionCreateOptions() { - super(); - } - - protected String getCollection() { - return collection; - } - - /** - * @param collection - * The name of the collection - * @return options - */ - protected VertexCollectionCreateOptions collection(final String collection) { - this.collection = collection; - return this; - } - -} +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.model; + +/** + * @author Mark Vollmary + */ +public final class AqlFunctionGetOptions { + + private String namespace; + + public AqlFunctionGetOptions() { + super(); + } + + public String getNamespace() { + return namespace; + } + + /** + * @param namespace Returns all registered AQL user functions from namespace + * @return options + */ + public AqlFunctionGetOptions namespace(final String namespace) { + this.namespace = namespace; + return this; + } + +} diff --git a/core/src/main/java/com/arangodb/model/AqlQueryExplainOptions.java b/core/src/main/java/com/arangodb/model/AqlQueryExplainOptions.java new file mode 100644 index 000000000..ac4d1d161 --- /dev/null +++ b/core/src/main/java/com/arangodb/model/AqlQueryExplainOptions.java @@ -0,0 +1,152 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.model; + +import com.arangodb.ArangoDatabase; +import com.arangodb.internal.serde.UserDataInside; + +import java.util.Collection; +import java.util.Map; + +/** + * @author Mark Vollmary + * @author Michele Rastelli + * + * @deprecated for removal, use {@link ArangoDatabase#explainAqlQuery(String, Map, ExplainAqlQueryOptions)} instead + */ +@Deprecated +public final class AqlQueryExplainOptions { + + private Map bindVars; + private String query; + private Options options; + + public AqlQueryExplainOptions() { + super(); + } + + @UserDataInside + public Map getBindVars() { + return bindVars; + } + + /** + * @param bindVars key/value pairs representing the bind parameters + * @return options + */ + AqlQueryExplainOptions bindVars(final Map bindVars) { + this.bindVars = bindVars; + return this; + } + + public String getQuery() { + return query; + } + + /** + * @param query the query which you want explained + * @return options + */ + AqlQueryExplainOptions query(final String query) { + this.query = query; + return this; + } + + public Integer getMaxNumberOfPlans() { + return getOptions().maxNumberOfPlans; + } + + /** + * @param maxNumberOfPlans an optional maximum number of plans that the optimizer is allowed to generate. Setting + * this attribute + * to a low value allows to put a cap on the amount of work the optimizer does. + * @return options + */ + public AqlQueryExplainOptions maxNumberOfPlans(final Integer maxNumberOfPlans) { + getOptions().maxNumberOfPlans = maxNumberOfPlans; + return this; + } + + public Boolean getAllPlans() { + return getOptions().allPlans; + } + + /** + * @param allPlans if set to true, all possible execution plans will be returned. The default is false, meaning + * only the + * optimal plan will be returned. + * @return options + */ + public AqlQueryExplainOptions allPlans(final Boolean allPlans) { + getOptions().allPlans = allPlans; + return this; + } + + public Collection getRules() { + return getOptions().getOptimizer().rules; + } + + /** + * @param rules an array of to-be-included or to-be-excluded optimizer rules can be put into this attribute, telling + * the optimizer to include or exclude specific rules. + * @return options + */ + public AqlQueryExplainOptions rules(final Collection rules) { + getOptions().getOptimizer().rules = rules; + return this; + } + + public Options getOptions() { + if (options == null) { + options = new Options(); + } + return options; + } + + public static final class Options { + private Optimizer optimizer; + private Integer maxNumberOfPlans; + private Boolean allPlans; + + public Optimizer getOptimizer() { + if (optimizer == null) { + optimizer = new Optimizer(); + } + return optimizer; + } + + public Integer getMaxNumberOfPlans() { + return maxNumberOfPlans; + } + + public Boolean getAllPlans() { + return allPlans; + } + } + + public static final class Optimizer { + private Collection rules; + + public Collection getRules() { + return rules; + } + } +} diff --git a/core/src/main/java/com/arangodb/model/AqlQueryOptions.java b/core/src/main/java/com/arangodb/model/AqlQueryOptions.java new file mode 100644 index 000000000..ccf217a16 --- /dev/null +++ b/core/src/main/java/com/arangodb/model/AqlQueryOptions.java @@ -0,0 +1,1032 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.model; + +import com.arangodb.ArangoCursor; +import com.arangodb.internal.serde.UserDataInside; +import com.fasterxml.jackson.annotation.JsonAnyGetter; +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.fasterxml.jackson.annotation.JsonInclude; + +import java.util.*; + +/** + * @author Mark Vollmary + * @author Michele Rastelli + */ +public final class AqlQueryOptions extends TransactionalOptions implements Cloneable { + + private Boolean allowDirtyRead; + private Integer batchSize; + private Map bindVars; + private Boolean cache; + private Boolean count; + private Long memoryLimit; + private Options options; + private String query; + private Integer ttl; + + @Override + AqlQueryOptions getThis() { + return this; + } + + public Boolean getAllowDirtyRead() { + return allowDirtyRead; + } + + /** + * Sets the header {@code x-arango-allow-dirty-read} to {@code true} to allow the Coordinator to ask any shard + * replica for the data, not only the shard leader. This may result in β€œdirty reads”. + *

+ * The header is ignored if this operation is part of a Stream Transaction + * ({@link AqlQueryOptions#streamTransactionId(String)}). The header set when creating the transaction decides + * about dirty reads for the entire transaction, not the individual read operations. + * + * @param allowDirtyRead Set to {@code true} allows reading from followers in an active-failover setup. + * @return this + * @see API + * Documentation + */ + public AqlQueryOptions allowDirtyRead(final Boolean allowDirtyRead) { + this.allowDirtyRead = allowDirtyRead; + return this; + } + + public Integer getBatchSize() { + return batchSize; + } + + /** + * @param batchSize maximum number of result documents to be transferred from the server to the client in one + * roundtrip. If this attribute is not set, a server-controlled default value will be used. + * A batchSize value of 0 is disallowed. + * @return this + */ + public AqlQueryOptions batchSize(final Integer batchSize) { + this.batchSize = batchSize; + return this; + } + + @UserDataInside + public Map getBindVars() { + return bindVars; + } + + /** + * @param bindVars A map with key/value pairs representing the bind parameters. For a bind variable {@code @var} in + * the query, specify the value using an attribute with the name {@code var}. For a collection bind + * variable {@code @@coll}, use {@code @coll} as the attribute name. + * @return this + */ + AqlQueryOptions bindVars(final Map bindVars) { + this.bindVars = bindVars; + return this; + } + + public Boolean getCache() { + return cache; + } + + /** + * @param cache flag to determine whether the AQL query results cache shall be used. If set to false, then any + * query cache lookup will be skipped for the query. If set to true, it will lead to the query cache + * being checked for the query if the query cache mode is either on or demand. + * @return this + */ + public AqlQueryOptions cache(final Boolean cache) { + this.cache = cache; + return this; + } + + public Boolean getCount() { + return count; + } + + /** + * @param count indicates whether the number of documents in the result set should be returned and made accessible + * via {@link ArangoCursor#getCount()}. Calculating the {@code count} attribute might have a + * performance impact for some queries in the future so this option is turned off by default, and + * {@code count} is only returned when requested. + * @return this + */ + public AqlQueryOptions count(final Boolean count) { + this.count = count; + return this; + } + + public Long getMemoryLimit() { + return memoryLimit; + } + + /** + * @param memoryLimit the maximum number of memory (measured in bytes) that the query is allowed to use. If set, + * then the query will fail with error {@code resource limit exceeded} in case it allocates too + * much memory. A value of {@code 0} indicates that there is no memory limit. + * @return this + * @since ArangoDB 3.1.0 + */ + public AqlQueryOptions memoryLimit(final Long memoryLimit) { + this.memoryLimit = memoryLimit; + return this; + } + + public Options getOptions() { + if (options == null) { + options = new Options(); + } + return options; + } + + /** + * @param options extra options for the query + * @return this + */ + public AqlQueryOptions options(final Options options) { + this.options = options; + return this; + } + + public String getQuery() { + return query; + } + + /** + * @param query the query to be executed + * @return this + */ + public AqlQueryOptions query(final String query) { + this.query = query; + return this; + } + + public Integer getTtl() { + return ttl; + } + + /** + * @param ttl The time-to-live for the cursor (in seconds). If the result set is small enough (less than or equal + * to batchSize) then results are returned right away. Otherwise, they are stored in memory and will be + * accessible via the cursor with respect to the ttl. The cursor will be removed on the server + * automatically after the specified amount of time. This is useful to ensure garbage collection of + * cursors that are not fully fetched by clients. + *

+ * If not set, a server-defined value will be used (default: 30 seconds). + *

+ * The time-to-live is renewed upon every access to the cursor. + * @return this + */ + public AqlQueryOptions ttl(final Integer ttl) { + this.ttl = ttl; + return this; + } + + @Override + public AqlQueryOptions clone() { + try { + AqlQueryOptions clone = (AqlQueryOptions) super.clone(); + clone.bindVars = bindVars != null ? new HashMap<>(bindVars) : null; + clone.options = options != null ? options.clone() : null; + return clone; + } catch (CloneNotSupportedException e) { + throw new AssertionError(); + } + } + + public static final class Options implements Cloneable { + private Map customOptions; + private Boolean allPlans; + private Boolean allowDirtyReads; + private Boolean allowRetry; + private Boolean failOnWarning; + private Boolean fillBlockCache; + private String forceOneShardAttributeValue; + private Boolean fullCount; + private Long intermediateCommitCount; + private Long intermediateCommitSize; + private Integer maxDNFConditionMembers; + private Integer maxNodesPerCallstack; + private Integer maxNumberOfPlans; + private Double maxRuntime; + private Long maxTransactionSize; + private Long maxWarningCount; + private Optimizer optimizer; + private Boolean profile; + private Double satelliteSyncWait; + private Collection shardIds; + private Boolean skipInaccessibleCollections; + private Long spillOverThresholdMemoryUsage; + private Long spillOverThresholdNumRows; + private Boolean stream; + private Boolean usePlanCache; + + @JsonInclude + @JsonAnyGetter + public Map getCustomOptions() { + if (customOptions == null) { + customOptions = new HashMap<>(); + } + return customOptions; + } + + public void setCustomOption(String key, Object value) { + getCustomOptions().put(key, value); + } + + public Boolean getAllPlans() { + return allPlans; + } + + public Boolean getAllowDirtyReads() { + return allowDirtyReads; + } + + public Boolean getAllowRetry() { + return allowRetry; + } + + public Boolean getFailOnWarning() { + return failOnWarning; + } + + public Boolean getFillBlockCache() { + return fillBlockCache; + } + + public String getForceOneShardAttributeValue() { + return forceOneShardAttributeValue; + } + + public Boolean getFullCount() { + return fullCount; + } + + public Long getIntermediateCommitCount() { + return intermediateCommitCount; + } + + public Long getIntermediateCommitSize() { + return intermediateCommitSize; + } + + public Integer getMaxDNFConditionMembers() { + return maxDNFConditionMembers; + } + + public Integer getMaxNodesPerCallstack() { + return maxNodesPerCallstack; + } + + public Integer getMaxNumberOfPlans() { + return maxNumberOfPlans; + } + + /** + * @deprecated for removal, use {@link Options#getMaxNumberOfPlans()} instead + */ + @Deprecated + @JsonIgnore + public Integer getMaxPlans() { + return getMaxNumberOfPlans(); + } + + public Double getMaxRuntime() { + return maxRuntime; + } + + public Long getMaxTransactionSize() { + return maxTransactionSize; + } + + public Long getMaxWarningCount() { + return maxWarningCount; + } + + public Optimizer getOptimizer() { + if (optimizer == null) { + optimizer = new Optimizer(); + } + return optimizer; + } + + public Boolean getProfile() { + return profile; + } + + public Double getSatelliteSyncWait() { + return satelliteSyncWait; + } + + public Collection getShardIds() { + return shardIds; + } + + public Boolean getSkipInaccessibleCollections() { + return skipInaccessibleCollections; + } + + public Long getSpillOverThresholdMemoryUsage() { + return spillOverThresholdMemoryUsage; + } + + public Long getSpillOverThresholdNumRows() { + return spillOverThresholdNumRows; + } + + public Boolean getStream() { + return stream; + } + + public Boolean getUsePlanCache() { + return usePlanCache; + } + + public void setAllPlans(Boolean allPlans) { + this.allPlans = allPlans; + } + + public void setAllowDirtyReads(Boolean allowDirtyReads) { + this.allowDirtyReads = allowDirtyReads; + } + + public void setAllowRetry(Boolean allowRetry) { + this.allowRetry = allowRetry; + } + + public void setFailOnWarning(Boolean failOnWarning) { + this.failOnWarning = failOnWarning; + } + + public void setFillBlockCache(Boolean fillBlockCache) { + this.fillBlockCache = fillBlockCache; + } + + public void setForceOneShardAttributeValue(String forceOneShardAttributeValue) { + this.forceOneShardAttributeValue = forceOneShardAttributeValue; + } + + public void setFullCount(Boolean fullCount) { + this.fullCount = fullCount; + } + + public void setIntermediateCommitCount(Long intermediateCommitCount) { + this.intermediateCommitCount = intermediateCommitCount; + } + + public void setIntermediateCommitSize(Long intermediateCommitSize) { + this.intermediateCommitSize = intermediateCommitSize; + } + + public void setMaxDNFConditionMembers(Integer maxDNFConditionMembers) { + this.maxDNFConditionMembers = maxDNFConditionMembers; + } + + public void setMaxNodesPerCallstack(Integer maxNodesPerCallstack) { + this.maxNodesPerCallstack = maxNodesPerCallstack; + } + + public void setMaxNumberOfPlans(Integer maxNumberOfPlans) { + this.maxNumberOfPlans = maxNumberOfPlans; + } + + public void setMaxRuntime(Double maxRuntime) { + this.maxRuntime = maxRuntime; + } + + public void setMaxTransactionSize(Long maxTransactionSize) { + this.maxTransactionSize = maxTransactionSize; + } + + public void setMaxWarningCount(Long maxWarningCount) { + this.maxWarningCount = maxWarningCount; + } + + public void setOptimizer(Optimizer optimizer) { + this.optimizer = optimizer; + } + + public void setProfile(Boolean profile) { + this.profile = profile; + } + + public void setSatelliteSyncWait(Double satelliteSyncWait) { + this.satelliteSyncWait = satelliteSyncWait; + } + + public void setShardIds(Collection shardIds) { + this.shardIds = shardIds; + } + + public void setSkipInaccessibleCollections(Boolean skipInaccessibleCollections) { + this.skipInaccessibleCollections = skipInaccessibleCollections; + } + + public void setSpillOverThresholdMemoryUsage(Long spillOverThresholdMemoryUsage) { + this.spillOverThresholdMemoryUsage = spillOverThresholdMemoryUsage; + } + + public void setSpillOverThresholdNumRows(Long spillOverThresholdNumRows) { + this.spillOverThresholdNumRows = spillOverThresholdNumRows; + } + + public void setStream(Boolean stream) { + this.stream = stream; + } + + public void setUsePlanCache(Boolean usePlanCache) { + this.usePlanCache = usePlanCache; + } + + @Override + public Options clone() { + try { + Options clone = (Options) super.clone(); + clone.customOptions = customOptions != null ? new HashMap<>(customOptions) : null; + clone.optimizer = optimizer != null ? optimizer.clone() : null; + clone.shardIds = shardIds != null ? new ArrayList<>(shardIds) : null; + return clone; + } catch (CloneNotSupportedException e) { + throw new AssertionError(); + } + } + } + + public static final class Optimizer implements Cloneable { + private Collection rules; + + public Collection getRules() { + return rules; + } + + public void setRules(Collection rules) { + this.rules = rules; + } + + @Override + public Optimizer clone() { + try { + Optimizer clone = (Optimizer) super.clone(); + clone.rules = rules != null ? new ArrayList<>(rules) : null; + return clone; + } catch (CloneNotSupportedException e) { + throw new AssertionError(); + } + } + } + + // ------------------------------------ + // --- accessors for nested options --- + // ------------------------------------ + + @JsonIgnore + public Map getCustomOptions() { + return getOptions().getCustomOptions(); + } + + /** + * Set an additional custom option in the form of key-value pair. + * + * @param key option name + * @param value option value + * @return this + */ + public AqlQueryOptions customOption(String key, Object value) { + getOptions().setCustomOption(key, value); + return this; + } + + @JsonIgnore + public Boolean getAllowDirtyReads() { + return getOptions().getAllowDirtyReads(); + } + + /** + * @param allowDirtyReads If you set this option to true and execute the query against a cluster deployment, then + * the Coordinator is allowed to read from any shard replica and not only from the leader. + * You may observe data inconsistencies (dirty reads) when reading from followers, namely + * obsolete revisions of documents because changes have not yet been replicated to the + * follower, as well as changes to documents before they are officially committed on the + * leader. This feature is only available in the Enterprise Edition. + * @return this + */ + public AqlQueryOptions allowDirtyReads(final Boolean allowDirtyReads) { + getOptions().setAllowDirtyReads(allowDirtyReads); + return this; + } + + @JsonIgnore + public Boolean getAllowRetry() { + return getOptions().getAllowRetry(); + } + + /** + * @param allowRetry Set this option to true to make it possible to retry fetching the latest batch from a cursor. + *

+ * This makes possible to safely retry invoking {@link com.arangodb.ArangoCursor#next()} in + * case of I/O exceptions (which are actually thrown as {@link com.arangodb.ArangoDBException} + * with cause {@link java.io.IOException}) + *

+ * If set to false (default), then it is not safe to retry invoking + * {@link com.arangodb.ArangoCursor#next()} in case of I/O exceptions, since the request to + * fetch the next batch is not idempotent (i.e. the cursor may advance multiple times on the + * server). + *

+ * Note: once you successfully received the last batch, you should call + * {@link com.arangodb.ArangoCursor#close()} so that the server does not unnecessary keep the + * batch until the cursor times out ({@link AqlQueryOptions#ttl(Integer)}). + * @return this + * @since ArangoDB 3.11 + */ + public AqlQueryOptions allowRetry(final Boolean allowRetry) { + getOptions().setAllowRetry(allowRetry); + return this; + } + + @JsonIgnore + public Boolean getFailOnWarning() { + return getOptions().getFailOnWarning(); + } + + /** + * @param failOnWarning When set to true, the query will throw an exception and abort instead of producing a + * warning. This option should be used during development to catch potential issues early. + * When the attribute is set to false, warnings will not be propagated to exceptions and will + * be returned with the query result. There is also a server configuration option + * --query.fail-on-warning for setting the default value for failOnWarning so it does not + * need to be set on a per-query level. + * @return this + */ + public AqlQueryOptions failOnWarning(final Boolean failOnWarning) { + getOptions().setFailOnWarning(failOnWarning); + return this; + } + + @JsonIgnore + public Boolean getFillBlockCache() { + return getOptions().getFillBlockCache(); + } + + /** + * @param fillBlockCache if set to true or not specified, this will make the query store + * the data it reads via the RocksDB storage engine in the RocksDB block cache. This is + * usually the desired behavior. The option can be set to false for queries that + * are known to either read a lot of data that would thrash the block cache, or for queries + * that read data known to be outside of the hot set. By setting the option + * to false, data read by the query will not make it into the RocksDB block + * cache if it is not already in there, thus leaving more room for the actual hot set. + * @return this + * @since ArangoDB 3.8.1 + */ + public AqlQueryOptions fillBlockCache(final Boolean fillBlockCache) { + getOptions().setFillBlockCache(fillBlockCache); + return this; + } + + @JsonIgnore + public String getForceOneShardAttributeValue() { + return getOptions().getForceOneShardAttributeValue(); + } + + /** + * @param forceOneShardAttributeValue This query option can be used in complex queries in case the query optimizer + * cannot automatically detect that the query can be limited to only a single + * server (e.g. in a disjoint smart graph case). + *

+ * If the option is set incorrectly, i.e. to a wrong shard key value, then the + * query may be shipped to a wrong DB server and may not return results (i.e. + * empty result set). + *

+ * Use at your own risk. + * @return this + */ + public AqlQueryOptions forceOneShardAttributeValue(final String forceOneShardAttributeValue) { + getOptions().setForceOneShardAttributeValue(forceOneShardAttributeValue); + return this; + } + + @JsonIgnore + public Boolean getFullCount() { + return getOptions().getFullCount(); + } + + /** + * @param fullCount if set to true and the query contains a LIMIT clause, then the result will have an extra + * attribute + * with the sub-attributes stats and fullCount, { ... , "extra": { "stats": { "fullCount": 123 } + * } }. The + * fullCount attribute will contain the number of documents in the result before the last LIMIT + * in the + * query was applied. It can be used to count the number of documents that match certain filter + * criteria, + * but only return a subset of them, in one go. It is thus similar to MySQL's + * SQL_CALC_FOUND_ROWS hint. + * Note that setting the option will disable a few LIMIT optimizations and may lead to more + * documents + * being processed, and thus make queries run longer. Note that the fullCount attribute will + * only be + * present in the result if the query has a LIMIT clause and the LIMIT clause is actually used + * in the + * query. + * @return this + */ + public AqlQueryOptions fullCount(final Boolean fullCount) { + getOptions().setFullCount(fullCount); + return this; + } + + @JsonIgnore + public Long getIntermediateCommitCount() { + return getOptions().getIntermediateCommitCount(); + } + + /** + * @param intermediateCommitCount Maximum number of operations after which an intermediate commit is performed + * automatically. Honored by + * the RocksDB storage engine only. + * @return this + * @since ArangoDB 3.2.0 + */ + public AqlQueryOptions intermediateCommitCount(final Long intermediateCommitCount) { + getOptions().setIntermediateCommitCount(intermediateCommitCount); + return this; + } + + @JsonIgnore + public Long getIntermediateCommitSize() { + return getOptions().getIntermediateCommitSize(); + } + + /** + * @param intermediateCommitSize Maximum total size of operations after which an intermediate commit is performed + * automatically. + * Honored by the RocksDB storage engine only. + * @return this + * @since ArangoDB 3.2.0 + */ + public AqlQueryOptions intermediateCommitSize(final Long intermediateCommitSize) { + getOptions().setIntermediateCommitSize(intermediateCommitSize); + return this; + } + + @JsonIgnore + public Integer getMaxDNFConditionMembers() { + return getOptions().getMaxDNFConditionMembers(); + } + + /** + * @param maxDNFConditionMembers A threshold for the maximum number of OR sub-nodes in the internal representation + * of an AQL FILTER condition. + *

+ * Yon can use this option to limit the computation time and memory usage when + * converting complex AQL FILTER conditions into the internal DNF (disjunctive normal + * form) format. FILTER conditions with a lot of logical branches (AND, OR, NOT) can + * take a large amount of processing time and memory. This query option limits the + * computation time and memory usage for such conditions. + *

+ * Once the threshold value is reached during the DNF conversion of a FILTER + * condition, the conversion is aborted, and the query continues with a simplified + * internal representation of the condition, which cannot be used for index lookups. + *

+ * You can set the threshold globally instead of per query with the + * --query.max-dnf-condition-members startup option. + * @return this + */ + public AqlQueryOptions maxDNFConditionMembers(final Integer maxDNFConditionMembers) { + getOptions().setMaxDNFConditionMembers(maxDNFConditionMembers); + return this; + } + + @JsonIgnore + public Integer getMaxNodesPerCallstack() { + return getOptions().getMaxNodesPerCallstack(); + } + + /** + * @param maxNodesPerCallstack The number of execution nodes in the query plan after that stack splitting is + * performed to avoid a potential stack overflow. Defaults to the configured value of + * the startup option --query.max-nodes-per-callstack. + *

+ * This option is only useful for testing and debugging and normally does not need any + * adjustment. + * @return this + */ + public AqlQueryOptions maxNodesPerCallstack(final Integer maxNodesPerCallstack) { + getOptions().setMaxNodesPerCallstack(maxNodesPerCallstack); + return this; + } + + @JsonIgnore + public Integer getMaxNumberOfPlans() { + return getOptions().getMaxNumberOfPlans(); + } + + /** + * @param maxNumberOfPlans Limits the maximum number of plans that are created by the AQL query optimizer. + * @return this + */ + public AqlQueryOptions maxNumberOfPlans(final Integer maxNumberOfPlans) { + getOptions().setMaxNumberOfPlans(maxNumberOfPlans); + return this; + } + + /** + * @deprecated for removal, use {@link AqlQueryOptions#getMaxNumberOfPlans()} instead + */ + @Deprecated + @JsonIgnore + public Integer getMaxPlans() { + return getMaxNumberOfPlans(); + } + + /** + * @param maxPlans Limits the maximum number of plans that are created by the AQL query optimizer. + * @return this + * @deprecated for removal, use {@link AqlQueryOptions#maxNumberOfPlans(Integer)} instead + */ + @Deprecated + public AqlQueryOptions maxPlans(final Integer maxPlans) { + return maxNumberOfPlans(maxPlans); + } + + @JsonIgnore + public Double getMaxRuntime() { + return getOptions().getMaxRuntime(); + } + + /** + * @param maxRuntime The query has to be executed within the given runtime or it will be killed. The value is specified + * in seconds. The default value is 0.0 (no timeout). + * @return this + */ + public AqlQueryOptions maxRuntime(final Double maxRuntime) { + getOptions().setMaxRuntime(maxRuntime); + return this; + } + + @JsonIgnore + public Long getMaxTransactionSize() { + return getOptions().getMaxTransactionSize(); + } + + /** + * @param maxTransactionSize Transaction size limit in bytes. Honored by the RocksDB storage engine only. + * @return this + * @since ArangoDB 3.2.0 + */ + public AqlQueryOptions maxTransactionSize(final Long maxTransactionSize) { + getOptions().setMaxTransactionSize(maxTransactionSize); + return this; + } + + @JsonIgnore + public Long getMaxWarningCount() { + return getOptions().getMaxWarningCount(); + } + + /** + * @param maxWarningCount Limits the maximum number of warnings a query will return. The number of warnings a + * query will return + * is limited to 10 by default, but that number can be increased or decreased by setting + * this attribute. + * @return this + * @since ArangoDB 3.2.0 + */ + public AqlQueryOptions maxWarningCount(final Long maxWarningCount) { + getOptions().setMaxWarningCount(maxWarningCount); + return this; + } + + @JsonIgnore + public Optimizer getOptimizer() { + return getOptions().getOptimizer(); + } + + /** + * @param optimizer Options related to the query optimizer. + * @return this + */ + public AqlQueryOptions optimizer(final Optimizer optimizer) { + getOptions().setOptimizer(optimizer); + return this; + } + + @JsonIgnore + public Boolean getProfile() { + return getOptions().getProfile(); + } + + /** + * @param profile If set to true, then the additional query profiling information will be returned in the + * sub-attribute + * profile of the extra return attribute if the query result is not served from the query cache. + * @return this + */ + public AqlQueryOptions profile(final Boolean profile) { + getOptions().setProfile(profile); + return this; + } + + @JsonIgnore + public Double getSatelliteSyncWait() { + return getOptions().getSatelliteSyncWait(); + } + + /** + * @param satelliteSyncWait This enterprise parameter allows to configure how long a DBServer will have time to + * bring the + * satellite collections involved in the query into sync. The default value is 60.0 + * (seconds). When the + * max time has been reached the query will be stopped. + * @return this + * @since ArangoDB 3.2.0 + */ + public AqlQueryOptions satelliteSyncWait(final Double satelliteSyncWait) { + getOptions().setSatelliteSyncWait(satelliteSyncWait); + return this; + } + + @JsonIgnore + public Collection getShardIds() { + return getOptions().getShardIds(); + } + + /** + * Restrict query to shards by given ids. This is an internal option. Use at your own risk. + * + * @param shardIds + * @return this + */ + public AqlQueryOptions shardIds(final String... shardIds) { + getOptions().setShardIds(Arrays.asList(shardIds)); + return this; + } + + @JsonIgnore + public Boolean getSkipInaccessibleCollections() { + return getOptions().getSkipInaccessibleCollections(); + } + + /** + * @param skipInaccessibleCollections AQL queries (especially graph traversals) will treat collection to which a + * user has no access rights + * as if these collections were empty. Instead of returning a forbidden access + * error, your queries will + * execute normally. This is intended to help with certain use-cases: A graph + * contains several + * collections and different users execute AQL queries on that graph. You can + * now naturally limit the + * accessible results by changing the access rights of users on collections. + * This feature is only + * available in the Enterprise Edition. + * @return this + * @since ArangoDB 3.2.0 + */ + public AqlQueryOptions skipInaccessibleCollections(final Boolean skipInaccessibleCollections) { + getOptions().setSkipInaccessibleCollections(skipInaccessibleCollections); + return this; + } + + @JsonIgnore + public Long getSpillOverThresholdMemoryUsage() { + return getOptions().getSpillOverThresholdMemoryUsage(); + } + + /** + * @param spillOverThresholdMemoryUsage This option allows queries to store intermediate and final results + * temporarily on disk if the amount of memory used (in bytes) exceeds the + * specified value. This is used for decreasing the memory usage during the + * query execution. + *

+ * This option only has an effect on queries that use the SORT operation but + * without a LIMIT, and if you enable the spillover feature by setting a path + * for the directory to store the temporary data in with the + * --temp.intermediate-results-path startup option. + *

+ * Default value: 128MB. + *

+ * Spilling data from RAM onto disk is an experimental feature and is turned + * off by default. The query results are still built up entirely in RAM on + * Coordinators and single servers for non-streaming queries. To avoid the + * buildup of the entire query result in RAM, use a streaming query (see the + * stream option). + * @return this + */ + public AqlQueryOptions spillOverThresholdMemoryUsage(final Long spillOverThresholdMemoryUsage) { + getOptions().setSpillOverThresholdMemoryUsage(spillOverThresholdMemoryUsage); + return this; + } + + @JsonIgnore + public Long getSpillOverThresholdNumRows() { + return getOptions().getSpillOverThresholdNumRows(); + } + + /** + * @param spillOverThresholdNumRows This option allows queries to store intermediate and final results temporarily + * on disk if the number of rows produced by the query exceeds the specified value. + * This is used for decreasing the memory usage during the query execution. In a + * query that iterates over a collection that contains documents, each row is a + * document, and in a query that iterates over temporary values + * (i.e. FOR i IN 1..100), each row is one of such temporary values. + *

+ * This option only has an effect on queries that use the SORT operation but + * without a LIMIT, and if you enable the spillover feature by setting a path for + * the directory to store the temporary data in with the + * --temp.intermediate-results-path startup option. + *

+ * Default value: 5000000 rows. + *

+ * Spilling data from RAM onto disk is an experimental feature and is turned off + * by default. The query results are still built up entirely in RAM on Coordinators + * and single servers for non-streaming queries. To avoid the buildup of the entire + * query result in RAM, use a streaming query (see the stream option). + * @return this + */ + public AqlQueryOptions spillOverThresholdNumRows(final Long spillOverThresholdNumRows) { + getOptions().setSpillOverThresholdNumRows(spillOverThresholdNumRows); + return this; + } + + @JsonIgnore + public Boolean getStream() { + return getOptions().getStream(); + } + + @JsonIgnore + public Boolean getUsePlanCache() { + return getOptions().getUsePlanCache(); + } + + /** + * @param stream Specify true and the query will be executed in a streaming fashion. The query result is not + * stored on + * the server, but calculated on the fly. Beware: long-running queries will need to hold the + * collection + * locks for as long as the query cursor exists. When set to false a query will be executed right + * away in + * its entirety. In that case query results are either returned right away (if the resultset is small + * enough), or stored on the arangod instance and accessible via the cursor API (with respect to the + * ttl). It is advisable to only use this option on short-running queries or without exclusive locks + * (write-locks on MMFiles). Please note that the query options cache, count and fullCount will not + * work + * on streaming queries. Additionally query statistics, warnings and profiling data will only be + * available after the query is finished. The default value is false + * @return this + * @since ArangoDB 3.4.0 + */ + public AqlQueryOptions stream(final Boolean stream) { + getOptions().setStream(stream); + return this; + } + + /** + * @param usePlanCache Set this option to true to utilize a cached query plan or add the execution plan of this + * query to the cache if it’s not in the cache yet. Otherwise, the plan cache is bypassed + * (introduced in v3.12.4). + * Query plan caching can reduce the total time for processing queries by avoiding to parse, + * plan, and optimize queries over and over again that effectively have the same execution plan + * with at most some changes to bind parameter values. + * @return this + */ + public AqlQueryOptions usePlanCache(final Boolean usePlanCache) { + getOptions().setUsePlanCache(usePlanCache); + return this; + } + + @JsonIgnore + public Collection getRules() { + return getOptions().getOptimizer().getRules(); + } + + /** + * @param rules A list of to-be-included or to-be-excluded optimizer rules can be put into this attribute, + * telling the + * optimizer to include or exclude specific rules. To disable a rule, prefix its name with a -, to + * enable + * a rule, prefix it with a +. There is also a pseudo-rule all, which will match all optimizer rules + * @return this + */ + public AqlQueryOptions rules(final Collection rules) { + getOptions().getOptimizer().setRules(rules); + return this; + } + +} diff --git a/src/main/java/com/arangodb/model/DBCreateOptions.java b/core/src/main/java/com/arangodb/model/AqlQueryParseOptions.java similarity index 66% rename from src/main/java/com/arangodb/model/DBCreateOptions.java rename to core/src/main/java/com/arangodb/model/AqlQueryParseOptions.java index 2192ecfff..d26c44973 100644 --- a/src/main/java/com/arangodb/model/DBCreateOptions.java +++ b/core/src/main/java/com/arangodb/model/AqlQueryParseOptions.java @@ -1,49 +1,47 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.model; - -/** - * @author Mark Vollmary - * - */ -public class DBCreateOptions { - - private String name; - - public DBCreateOptions() { - super(); - } - - public String getName() { - return name; - } - - /** - * @param name - * Has to contain a valid database name - * @return options - */ - protected DBCreateOptions name(final String name) { - this.name = name; - return this; - } - -} +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.model; + +/** + * @author Mark Vollmary + */ +public final class AqlQueryParseOptions { + + private String query; + + public AqlQueryParseOptions() { + super(); + } + + public String getQuery() { + return query; + } + + /** + * @param query the query which you want parse + * @return options + */ + AqlQueryParseOptions query(final String query) { + this.query = query; + return this; + } + +} diff --git a/core/src/main/java/com/arangodb/model/CollectionCountOptions.java b/core/src/main/java/com/arangodb/model/CollectionCountOptions.java new file mode 100644 index 000000000..0c3d2f6f6 --- /dev/null +++ b/core/src/main/java/com/arangodb/model/CollectionCountOptions.java @@ -0,0 +1,33 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.model; + +/** + * @author Michele Rastelli + */ +public final class CollectionCountOptions extends TransactionalOptions { + + @Override + CollectionCountOptions getThis() { + return this; + } + +} diff --git a/core/src/main/java/com/arangodb/model/CollectionCreateOptions.java b/core/src/main/java/com/arangodb/model/CollectionCreateOptions.java new file mode 100644 index 000000000..143765c09 --- /dev/null +++ b/core/src/main/java/com/arangodb/model/CollectionCreateOptions.java @@ -0,0 +1,301 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.model; + +import com.arangodb.entity.*; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +import java.util.Arrays; +import java.util.Collection; + +/** + * @author Mark Vollmary + */ +public final class CollectionCreateOptions { + + private String name; + private ReplicationFactor replicationFactor; + private Integer writeConcern; + private KeyOptions keyOptions; + private Boolean waitForSync; + private final List computedValues = new ArrayList<>(); + private Collection shardKeys; + private Integer numberOfShards; + private Boolean isSystem; + private CollectionType type; + private String distributeShardsLike; + + private String shardingStrategy; // cluster option + private String smartJoinAttribute; // enterprise option + + private CollectionSchema schema; + + public CollectionCreateOptions() { + super(); + } + + public String getName() { + return name; + } + + /** + * @param name The name of the collection + * @return options + */ + CollectionCreateOptions name(final String name) { + this.name = name; + return this; + } + + public ReplicationFactor getReplicationFactor() { + return replicationFactor; + } + + /** + * @param replicationFactor (The default is 1): in a cluster, this attribute determines how many copies of each + * shard are kept on + * different DBServers. The value 1 means that only one copy (no synchronous + * replication) is kept. A + * value of k means that k-1 replicas are kept. Any two copies reside on different + * DBServers. Replication + * between them is synchronous, that is, every write operation to the "leader" copy will + * be replicated to + * all "follower" replicas, before the write operation is reported successful. If a + * server fails, this is + * detected automatically and one of the servers holding copies take over, usually + * without an error being + * reported. + * @return options + */ + public CollectionCreateOptions replicationFactor(final ReplicationFactor replicationFactor) { + this.replicationFactor = replicationFactor; + return this; + } + + public CollectionCreateOptions replicationFactor(int replicationFactor) { + this.replicationFactor = ReplicationFactor.of(replicationFactor); + return this; + } + + public Integer getWriteConcern() { + return writeConcern; + } + + /** + * @param writeConcern write concern for this collection (default: 1). + * It determines how many copies of each shard are required to be in sync on the different + * DB-Servers. If there are less then these many copies in the cluster a shard will refuse to + * write. Writes to shards with enough up-to-date copies will succeed at the same time however. + * The value of writeConcern can not be larger than replicationFactor. (cluster only) + * @return options + */ + public CollectionCreateOptions writeConcern(final Integer writeConcern) { + this.writeConcern = writeConcern; + return this; + } + + public KeyOptions getKeyOptions() { + return keyOptions; + } + + /** + * @param allowUserKeys if set to true, then it is allowed to supply own key values in the _key attribute of a + * document. If + * set to false, then the key generator will solely be responsible for generating keys and + * supplying own + * key values in the _key attribute of documents is considered an error. + * @param type specifies the type of the key generator. The currently available generators are + * traditional and + * autoincrement. + * @param increment increment value for autoincrement key generator. Not used for other key generator types. + * @param offset Initial offset value for autoincrement key generator. Not used for other key generator + * types. + * @return options + */ + public CollectionCreateOptions keyOptions( + final Boolean allowUserKeys, + final KeyType type, + final Integer increment, + final Integer offset) { + this.keyOptions = new KeyOptions(allowUserKeys, type, increment, offset); + return this; + } + + public Boolean getWaitForSync() { + return waitForSync; + } + + /** + * @param waitForSync If true then the data is synchronized to disk before returning from a document create, + * update, replace + * or removal operation. (default: false) + * @return options + */ + public CollectionCreateOptions waitForSync(final Boolean waitForSync) { + this.waitForSync = waitForSync; + return this; + } + + public Collection getShardKeys() { + return shardKeys; + } + + /** + * @param shardKeys (The default is [ "_key" ]): in a cluster, this attribute determines which document + * attributes are + * used to determine the target shard for documents. Documents are sent to shards based on the + * values of + * their shard key attributes. The values of all shard key attributes in a document are hashed, + * and the + * hash value is used to determine the target shard. Note: Values of shard key attributes cannot be + * changed once set. This option is meaningless in a single server setup. + * @return options + */ + public CollectionCreateOptions shardKeys(final String... shardKeys) { + this.shardKeys = Arrays.asList(shardKeys); + return this; + } + + /** + * @param smartJoinAttribute + * @return options + */ + public CollectionCreateOptions smartJoinAttribute(final String smartJoinAttribute) { + this.smartJoinAttribute = smartJoinAttribute; + return this; + } + + public String getSmartJoinAttribute() { + return smartJoinAttribute; + } + + /** + * @param shardingStrategy + * @return options + */ + public CollectionCreateOptions shardingStrategy(final String shardingStrategy) { + this.shardingStrategy = shardingStrategy; + return this; + } + + public String getShardingStrategy() { + return shardingStrategy; + } + + /** + * @param numberOfShards (The default is 1): in a cluster, this value determines the number of shards to create + * for the + * collection. In a single server setup, this option is meaningless. + * @return options + */ + public CollectionCreateOptions numberOfShards(final Integer numberOfShards) { + this.numberOfShards = numberOfShards; + return this; + } + + public Integer getNumberOfShards() { + return numberOfShards; + } + + public Boolean getIsSystem() { + return isSystem; + } + + /** + * @param isSystem If true, create a system collection. In this case collection-name should start with an + * underscore. End + * users should normally create non-system collections only. API implementors may be required to + * create + * system collections in very special occasions, but normally a regular collection will do. (The + * default + * is false) + * @return options + */ + public CollectionCreateOptions isSystem(final Boolean isSystem) { + this.isSystem = isSystem; + return this; + } + + public CollectionType getType() { + return type; + } + + /** + * @param type (The default is {@link CollectionType#DOCUMENT}): the type of the collection to create. + * @return options + */ + public CollectionCreateOptions type(final CollectionType type) { + this.type = type; + return this; + } + + public String getDistributeShardsLike() { + return distributeShardsLike; + } + + /** + * @param distributeShardsLike (The default is ""): in an enterprise cluster, this attribute binds the specifics + * of sharding for the + * newly created collection to follow that of a specified existing collection. Note: + * Using this parameter + * has consequences for the prototype collection. It can no longer be dropped, before + * sharding imitating + * collections are dropped. Equally, backups and restores of imitating collections + * alone will generate + * warnings, which can be overridden, about missing sharding prototype. + * @return options + */ + public CollectionCreateOptions distributeShardsLike(final String distributeShardsLike) { + this.distributeShardsLike = distributeShardsLike; + return this; + } + + public CollectionSchema getSchema() { + return schema; + } + + /** + * @param schema object that specifies the collection level schema for documents + * @return options + * @since ArangoDB 3.7 + */ + public CollectionCreateOptions schema(final CollectionSchema schema) { + this.schema = schema; + return this; + } + + /** + * @param computedValues An optional list of computed values. + * @return options + * @since ArangoDB 3.10 + */ + public CollectionCreateOptions computedValues(final ComputedValue... computedValues) { + Collections.addAll(this.computedValues, computedValues); + return this; + } + + public List getComputedValues() { + return computedValues; + } +} diff --git a/core/src/main/java/com/arangodb/model/CollectionPropertiesOptions.java b/core/src/main/java/com/arangodb/model/CollectionPropertiesOptions.java new file mode 100644 index 000000000..691b4344d --- /dev/null +++ b/core/src/main/java/com/arangodb/model/CollectionPropertiesOptions.java @@ -0,0 +1,155 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.model; + +import com.arangodb.entity.ReplicationFactor; +import com.fasterxml.jackson.annotation.JsonInclude; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +/** + * @author Mark Vollmary + */ +public final class CollectionPropertiesOptions { + + private Boolean cacheEnabled; + private List computedValues; + private ReplicationFactor replicationFactor; + private CollectionSchema schema; + private Boolean waitForSync; + private Integer writeConcern; + + public CollectionPropertiesOptions() { + super(); + } + + public Boolean getCacheEnabled() { + return cacheEnabled; + } + + /** + * @param cacheEnabled Whether the in-memory hash cache for documents should be enabled for this collection. Can be + * controlled globally with the --cache.size startup option. The cache can speed up repeated + * reads of the same documents via their document keys. If the same documents are not fetched + * often or are modified frequently, then you may disable the cache to avoid the maintenance + * costs. + * @return this + */ + public CollectionPropertiesOptions cacheEnabled(final Boolean cacheEnabled) { + this.cacheEnabled = cacheEnabled; + return this; + } + + public List getComputedValues() { + return computedValues; + } + + /** + * @param computedValues An optional list of computed values. + * @return this + * @since ArangoDB 3.10 + */ + public CollectionPropertiesOptions computedValues(final ComputedValue... computedValues) { + if (this.computedValues == null) { + this.computedValues = new ArrayList<>(); + } + Collections.addAll(this.computedValues, computedValues); + return this; + } + + public ReplicationFactor getReplicationFactor() { + return replicationFactor; + } + + /** + * @param replicationFactor In a cluster, this attribute determines how many copies of each shard are kept on + * different DB-Servers. The value 1 means that only one copy (no synchronous replication) + * is kept. A value of k means that k-1 replicas are kept. For SatelliteCollections, it + * needs to be the string "satellite", which matches the replication factor to the number + * of DB-Servers (Enterprise Edition only). + *

+ * Any two copies reside on different DB-Servers. Replication between them is synchronous, + * that is, every write operation to the β€œleader” copy will be replicated to all β€œfollower” + * replicas, before the write operation is reported successful. + *

+ * If a server fails, this is detected automatically and one of the servers holding copies + * take over, usually without an error being reported. + * @return this + */ + public CollectionPropertiesOptions replicationFactor(final ReplicationFactor replicationFactor) { + this.replicationFactor = replicationFactor; + return this; + } + + @JsonInclude(JsonInclude.Include.ALWAYS) + public CollectionSchema getSchema() { + return schema; + } + + /** + * @param schema object that specifies the collection level schema for documents + * @return this + * @since ArangoDB 3.7 + */ + public CollectionPropertiesOptions schema(final CollectionSchema schema) { + this.schema = schema; + return this; + } + + public Boolean getWaitForSync() { + return waitForSync; + } + + /** + * @param waitForSync If true then creating or changing a document will wait until the data has been synchronized + * to disk. + * @return this + */ + public CollectionPropertiesOptions waitForSync(final Boolean waitForSync) { + this.waitForSync = waitForSync; + return this; + } + + public Integer getWriteConcern() { + return writeConcern; + } + + /** + * @param writeConcern Determines how many copies of each shard are required to be in sync on the different + * DB-Servers. If there are less than these many copies in the cluster, a shard refuses to + * write. Writes to shards with enough up-to-date copies succeed at the same time, however. + * The value of writeConcern cannot be greater than replicationFactor. + *

+ * If distributeShardsLike is set, the default writeConcern is that of the prototype collection. + * For SatelliteCollections, the writeConcern is automatically controlled to equal the number of + * DB-Servers and has a value of 0. Otherwise, the default value is controlled by the current + * database’s default writeConcern, which uses the --cluster.write-concern startup option as + * default, which defaults to 1. (cluster only) + * @return this + */ + public CollectionPropertiesOptions writeConcern(final Integer writeConcern) { + this.writeConcern = writeConcern; + return this; + } + +} diff --git a/src/main/java/com/arangodb/model/CollectionRenameOptions.java b/core/src/main/java/com/arangodb/model/CollectionRenameOptions.java similarity index 66% rename from src/main/java/com/arangodb/model/CollectionRenameOptions.java rename to core/src/main/java/com/arangodb/model/CollectionRenameOptions.java index 96fc50080..6748f811f 100644 --- a/src/main/java/com/arangodb/model/CollectionRenameOptions.java +++ b/core/src/main/java/com/arangodb/model/CollectionRenameOptions.java @@ -1,49 +1,47 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.model; - -/** - * @author Mark Vollmary - * - */ -public class CollectionRenameOptions { - - private String name; - - public CollectionRenameOptions() { - super(); - } - - public String getName() { - return name; - } - - /** - * @param name - * The new name - * @return options - */ - protected CollectionRenameOptions name(final String name) { - this.name = name; - return this; - } - -} +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.model; + +/** + * @author Mark Vollmary + */ +public final class CollectionRenameOptions { + + private String name; + + public CollectionRenameOptions() { + super(); + } + + public String getName() { + return name; + } + + /** + * @param name The new name + * @return options + */ + CollectionRenameOptions name(final String name) { + this.name = name; + return this; + } + +} diff --git a/core/src/main/java/com/arangodb/model/CollectionSchema.java b/core/src/main/java/com/arangodb/model/CollectionSchema.java new file mode 100644 index 000000000..b6665c117 --- /dev/null +++ b/core/src/main/java/com/arangodb/model/CollectionSchema.java @@ -0,0 +1,128 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + + +package com.arangodb.model; + + +import com.arangodb.internal.serde.InternalDeserializers; +import com.arangodb.internal.serde.InternalSerializers; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.databind.annotation.JsonDeserialize; +import com.fasterxml.jackson.databind.annotation.JsonSerialize; + +/** + * @author Michele Rastelli + * @since ArangoDB 3.7 + */ +public final class CollectionSchema { + + private String rule; + private Level level; + private String message; + + /** + * @return JSON Schema description + */ + @JsonSerialize(using = InternalSerializers.CollectionSchemaRuleSerializer.class) + public String getRule() { + return rule; + } + + @JsonDeserialize(using = InternalDeserializers.CollectionSchemaRuleDeserializer.class) + public CollectionSchema setRule(String rule) { + this.rule = rule; + return this; + } + + /** + * @return controls when the validation will be applied + */ + public Level getLevel() { + return level; + } + + public CollectionSchema setLevel(Level level) { + this.level = level; + return this; + } + + /** + * @return the message that will be used when validation fails + */ + public String getMessage() { + return message; + } + + public CollectionSchema setMessage(String message) { + this.message = message; + return this; + } + + public enum Level { + + /** + * The rule is inactive and validation thus turned off. + */ + @JsonProperty("none") + NONE("none"), + + /** + * Only newly inserted documents are validated. + */ + @JsonProperty("new") + NEW("new"), + + /** + * New and modified documents must pass validation, except for modified documents where the OLD value did not + * pass validation already. This level is useful if you have documents which do not match your target structure, + * but you want to stop the insertion of more invalid documents and prohibit that valid documents are changed to + * invalid documents. + */ + @JsonProperty("moderate") + MODERATE("moderate"), + + /** + * All new and modified document must strictly pass validation. No exceptions are made (default). + */ + @JsonProperty("strict") + STRICT("strict"); + + private final String value; + + Level(String value) { + this.value = value; + } + + public static Level of(String label) { + for (Level e : values()) { + if (e.value.equals(label)) { + return e; + } + } + return null; + } + + public String getValue() { + return value; + } + } + +} diff --git a/core/src/main/java/com/arangodb/model/CollectionTruncateOptions.java b/core/src/main/java/com/arangodb/model/CollectionTruncateOptions.java new file mode 100644 index 000000000..f95012ac1 --- /dev/null +++ b/core/src/main/java/com/arangodb/model/CollectionTruncateOptions.java @@ -0,0 +1,33 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.model; + +/** + * @author Michele Rastelli + */ +public final class CollectionTruncateOptions extends TransactionalOptions { + + @Override + CollectionTruncateOptions getThis() { + return this; + } + +} diff --git a/core/src/main/java/com/arangodb/model/CollectionsReadOptions.java b/core/src/main/java/com/arangodb/model/CollectionsReadOptions.java new file mode 100644 index 000000000..f92356781 --- /dev/null +++ b/core/src/main/java/com/arangodb/model/CollectionsReadOptions.java @@ -0,0 +1,47 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.model; + +/** + * @author Mark Vollmary + */ +public final class CollectionsReadOptions { + + private Boolean excludeSystem; + + public CollectionsReadOptions() { + super(); + } + + public Boolean getExcludeSystem() { + return excludeSystem; + } + + /** + * @param excludeSystem Whether or not system collections should be excluded from the result. + * @return options + */ + public CollectionsReadOptions excludeSystem(final Boolean excludeSystem) { + this.excludeSystem = excludeSystem; + return this; + } + +} diff --git a/core/src/main/java/com/arangodb/model/ComputedValue.java b/core/src/main/java/com/arangodb/model/ComputedValue.java new file mode 100644 index 000000000..54d6a2b9f --- /dev/null +++ b/core/src/main/java/com/arangodb/model/ComputedValue.java @@ -0,0 +1,128 @@ +package com.arangodb.model; + +import java.util.Collections; +import java.util.HashSet; +import java.util.Objects; +import java.util.Set; + +/** + * @since ArangoDB 3.10 + */ +public final class ComputedValue { + private String name; + private String expression; + private Boolean overwrite; + private Set computeOn; + private Boolean keepNull; + private Boolean failOnWarning; + + public enum ComputeOn { + insert, update, replace + } + + public ComputedValue() { + super(); + } + + /** + * @param name (required) The name of the target attribute. Can only be a top-level attribute, but you may return + * a nested object. Cannot be _key, _id, _rev, _from, _to, or a shard key attribute. + * @return this + */ + public ComputedValue name(final String name) { + this.name = name; + return this; + } + + /** + * @param expression (required) An AQL RETURN operation with an expression that computes the desired value. See + * Computed Value Expressions + * for details. + * @return this + */ + public ComputedValue expression(final String expression) { + this.expression = expression; + return this; + } + + /** + * @param overwrite (required) Whether the computed value shall take precedence over a user-provided or existing + * attribute. + * @return this + */ + public ComputedValue overwrite(final Boolean overwrite) { + this.overwrite = overwrite; + return this; + } + + /** + * @param computeOn (optional) An array of operations to define on which write operations the value shall be + * computed. The default is ["insert", "update", "replace"]. + * @return this + */ + public ComputedValue computeOn(final ComputeOn... computeOn) { + if (this.computeOn == null) { + this.computeOn = new HashSet<>(); + } + Collections.addAll(this.computeOn, computeOn); + return this; + } + + /** + * @param keepNull (optional) Whether the target attribute shall be set if the expression evaluates to null. You + * can set the option to false to not set (or unset) the target attribute if the expression + * returns null. The default is true. + * @return this + */ + public ComputedValue keepNull(final Boolean keepNull) { + this.keepNull = keepNull; + return this; + } + + /** + * @param failOnWarning (optional) Whether to let the write operation fail if the expression produces a warning. + * The default is false. + * @return this + */ + public ComputedValue failOnWarning(final Boolean failOnWarning) { + this.failOnWarning = failOnWarning; + return this; + } + + public String getName() { + return name; + } + + public String getExpression() { + return expression; + } + + public Boolean getOverwrite() { + return overwrite; + } + + public Set getComputeOn() { + return computeOn; + } + + public Boolean getKeepNull() { + return keepNull; + } + + public Boolean getFailOnWarning() { + return failOnWarning; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + ComputedValue that = (ComputedValue) o; + return Objects.equals(name, that.name) && Objects.equals(expression, that.expression) && Objects.equals(overwrite, that.overwrite) && Objects.equals(computeOn, that.computeOn) && Objects.equals(keepNull, that.keepNull) && Objects.equals(failOnWarning, that.failOnWarning); + } + + @Override + public int hashCode() { + return Objects.hash(name, expression, overwrite, computeOn, keepNull, failOnWarning); + } +} diff --git a/core/src/main/java/com/arangodb/model/DBCreateOptions.java b/core/src/main/java/com/arangodb/model/DBCreateOptions.java new file mode 100644 index 000000000..72115be1a --- /dev/null +++ b/core/src/main/java/com/arangodb/model/DBCreateOptions.java @@ -0,0 +1,77 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.model; + +import java.util.Collection; + +/** + * @author Mark Vollmary + */ +public final class DBCreateOptions { + + private Collection users; + private String name; + private DatabaseOptions options; + + public DBCreateOptions() { + super(); + } + + public Collection getUsers() { + return users; + } + + /** + * @param users array of user objects to initially create for the new database. + * User information will not be changed for users that already exist. + * If users is not specified or does not contain any users, a default user + * root will be created with an empty string password. This ensures that the + * new database will be accessible after it is created. + * @return options + */ + public DBCreateOptions users(final Collection users) { + this.users = users; + return this; + } + + public String getName() { + return name; + } + + /** + * @param name database name + * @return options + */ + public DBCreateOptions name(final String name) { + this.name = name; + return this; + } + + public DatabaseOptions getOptions() { + return options; + } + + public DBCreateOptions options(DatabaseOptions options) { + this.options = options; + return this; + } + +} diff --git a/core/src/main/java/com/arangodb/model/DatabaseOptions.java b/core/src/main/java/com/arangodb/model/DatabaseOptions.java new file mode 100644 index 000000000..62bbaa7cd --- /dev/null +++ b/core/src/main/java/com/arangodb/model/DatabaseOptions.java @@ -0,0 +1,91 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.model; + +import com.arangodb.entity.ReplicationFactor; + +/** + * @author Michele Rastelli + * @since ArangoDB 3.6.0 + */ +public final class DatabaseOptions { + + private ReplicationFactor replicationFactor; + private Integer writeConcern; + private String sharding; + + public DatabaseOptions() { + super(); + } + + public ReplicationFactor getReplicationFactor() { + return replicationFactor; + } + + public Integer getWriteConcern() { + return writeConcern; + } + + public String getSharding() { + return sharding; + } + + /** + * @param replicationFactor the default replication factor for collections in this database + * @return options + * @since ArangoDB 3.6.0 + */ + public DatabaseOptions replicationFactor(final ReplicationFactor replicationFactor) { + this.replicationFactor = replicationFactor; + return this; + } + + public DatabaseOptions replicationFactor(int replicationFactor) { + this.replicationFactor = ReplicationFactor.of(replicationFactor); + return this; + } + + /** + * Default write concern for new collections created in this database. It determines how many copies of each shard + * are required to be in sync on the different DBServers. If there are less then these many copies in the cluster a + * shard will refuse to write. Writes to shards with enough up-to-date copies will succeed at the same time however. + * The value of writeConcern can not be larger than replicationFactor. (cluster only) + * + * @return options + * @since ArangoDB 3.6.0 + */ + public DatabaseOptions writeConcern(final Integer writeConcern) { + this.writeConcern = writeConcern; + return this; + } + + /** + * @param sharding The sharding method to use for new collections in this database. + * Valid values are: β€œβ€, β€œflexible”, or β€œsingle”. The first two are equivalent. + * @return options + * @since ArangoDB 3.6.0 + */ + public DatabaseOptions sharding(String sharding) { + this.sharding = sharding; + return this; + } + +} diff --git a/core/src/main/java/com/arangodb/model/DatabaseUsersOptions.java b/core/src/main/java/com/arangodb/model/DatabaseUsersOptions.java new file mode 100644 index 000000000..63d11e6c8 --- /dev/null +++ b/core/src/main/java/com/arangodb/model/DatabaseUsersOptions.java @@ -0,0 +1,90 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.model; + +import java.util.Map; + +/** + * @author Michele Rastelli + */ +public final class DatabaseUsersOptions { + + private String username; + private Map extra; + private String passwd; + private Boolean active; + + public String getUsername() { + return username; + } + + /** + * @param username Login name of the user to be created + * @return options + */ + public DatabaseUsersOptions username(final String username) { + this.username = username; + return this; + } + + public Map getExtra() { + return extra; + } + + /** + * @param extra extra user information. The data contained in extra + * will be stored for the user but not be interpreted further by ArangoDB. + * @return options + */ + public DatabaseUsersOptions extra(final Map extra) { + this.extra = extra; + return this; + } + + public String getPasswd() { + return passwd; + } + + /** + * @param passwd The user password as a string. If not specified, it will default to an empty string. + * @return options + */ + public DatabaseUsersOptions passwd(final String passwd) { + this.passwd = passwd; + return this; + } + + public Boolean getActive() { + return active; + } + + /** + * @param active A flag indicating whether the user account should be activated or not. + * The default value is true. If set to false, the user won't be able to + * log into the database. + * @return options + */ + public DatabaseUsersOptions active(final Boolean active) { + this.active = active; + return this; + } + +} diff --git a/core/src/main/java/com/arangodb/model/DocumentCreateOptions.java b/core/src/main/java/com/arangodb/model/DocumentCreateOptions.java new file mode 100644 index 000000000..31217673b --- /dev/null +++ b/core/src/main/java/com/arangodb/model/DocumentCreateOptions.java @@ -0,0 +1,195 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.model; + +/** + * @author Mark Vollmary + * @author Michele Rastelli + */ +public final class DocumentCreateOptions extends TransactionalOptions { + + private Boolean waitForSync; + private Boolean returnNew; + private Boolean returnOld; + private OverwriteMode overwriteMode; + private Boolean silent; + private Boolean mergeObjects; + private Boolean keepNull; + private Boolean refillIndexCaches; + private String versionAttribute; + + @Override + DocumentCreateOptions getThis() { + return this; + } + + public Boolean getWaitForSync() { + return waitForSync; + } + + /** + * @param waitForSync Wait until document has been synced to disk. + * @return options + */ + public DocumentCreateOptions waitForSync(final Boolean waitForSync) { + this.waitForSync = waitForSync; + return this; + } + + public Boolean getReturnNew() { + return returnNew; + } + + /** + * @param returnNew Return additionally the complete new document under the attribute new in the result. + * @return options + */ + public DocumentCreateOptions returnNew(final Boolean returnNew) { + this.returnNew = returnNew; + return this; + } + + public Boolean getReturnOld() { + return returnOld; + } + + /** + * @param returnOld Additionally return the complete old document under the attribute old in the result. + * @return options + * @since ArangoDB 3.4 + */ + public DocumentCreateOptions returnOld(final Boolean returnOld) { + this.returnOld = returnOld; + return this; + } + + public OverwriteMode getOverwriteMode() { + return overwriteMode; + } + + /** + * @param overwriteMode This parameter can be set to replace or update. If given it sets implicitly the overwrite + * flag. In case it is set to update, the replace-insert becomes an update-insert. Otherwise + * this option follows the rules of the overwrite parameter. + * @return options + * @since ArangoDB 3.7 + */ + public DocumentCreateOptions overwriteMode(final OverwriteMode overwriteMode) { + this.overwriteMode = overwriteMode; + return this; + } + + public Boolean getSilent() { + return silent; + } + + /** + * @param silent If set to true, an empty object will be returned as response. No meta-data will be returned for the + * created document. This option can be used to save some network traffic. + * @return options + */ + public DocumentCreateOptions silent(final Boolean silent) { + this.silent = silent; + return this; + } + + public Boolean getMergeObjects() { + return mergeObjects; + } + + /** + * Only considered if {@link DocumentCreateOptions#overwriteMode(OverwriteMode)} is set to {@link OverwriteMode#update} + * + * @param mergeObjects Controls whether objects (not arrays) will be merged if present in both the existing and + * the patch + * document. If set to false, the value in the patch document will overwrite the existing + * document's + * value. If set to true, objects will be merged. The default is true. + * @return options + * @since ArangoDB 3.7 + */ + public DocumentCreateOptions mergeObjects(Boolean mergeObjects) { + this.mergeObjects = mergeObjects; + return this; + } + + public Boolean getKeepNull() { + return keepNull; + } + + /** + * @param keepNull If the intention is to delete existing attributes with the update-insert command, the URL + * query parameter keepNull can be used with a value of false. This will modify the behavior of + * the patch command to remove any attributes from the existing document that are contained in + * the patch document with an attribute value of null. This option controls the update-insert + * behavior only. + * @return options + * @since ArangoDB 3.7 + */ + public DocumentCreateOptions keepNull(Boolean keepNull) { + this.keepNull = keepNull; + return this; + } + + public Boolean getRefillIndexCaches() { + return refillIndexCaches; + } + + /** + * @param refillIndexCaches Whether to add a new entry to the in-memory edge cache if an edge document is inserted. + * @return options + * @since ArangoDB 3.11 + */ + public DocumentCreateOptions refillIndexCaches(Boolean refillIndexCaches) { + this.refillIndexCaches = refillIndexCaches; + return this; + } + + public String getVersionAttribute() { + return versionAttribute; + } + + /** + * Only applicable if {@link #overwriteMode(OverwriteMode)} is set to {@link OverwriteMode#update} or + * {@link OverwriteMode#replace}. + * You can use the {@code versionAttribute} option for external versioning support. + * If set, the attribute with the name specified by the option is looked up in the stored document and the attribute + * value is compared numerically to the value of the versioning attribute in the supplied document that is supposed + * to update/replace it. + * If the version number in the new document is higher (rounded down to a whole number) than in the document that + * already exists in the database, then the update/replace operation is performed normally. This is also the case if + * the new versioning attribute has a non-numeric value, if it is a negative number, or if the attribute doesn't + * exist in the supplied or stored document. + * If the version number in the new document is lower or equal to what exists in the database, the operation is not + * performed and the existing document thus not changed. No error is returned in this case. + * The attribute can only be a top-level attribute. + * You can check if _oldRev (if present) and _rev are different to determine if the document has been changed. + * + * @param versionAttribute the attribute name to use for versioning + * @return options + * @since ArangoDB 3.12 + */ + public DocumentCreateOptions versionAttribute(String versionAttribute) { + this.versionAttribute = versionAttribute; + return this; + } + +} diff --git a/core/src/main/java/com/arangodb/model/DocumentDeleteOptions.java b/core/src/main/java/com/arangodb/model/DocumentDeleteOptions.java new file mode 100644 index 000000000..f179fd944 --- /dev/null +++ b/core/src/main/java/com/arangodb/model/DocumentDeleteOptions.java @@ -0,0 +1,124 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.model; + +/** + * @author Mark Vollmary + * @author Michele Rastelli + */ +public final class DocumentDeleteOptions extends TransactionalOptions { + + private Boolean waitForSync; + private String ifMatch; + private Boolean returnOld; + private Boolean silent; + private Boolean refillIndexCaches; + private Boolean ignoreRevs; + + @Override + DocumentDeleteOptions getThis() { + return this; + } + + public Boolean getWaitForSync() { + return waitForSync; + } + + /** + * @param waitForSync Wait until deletion operation has been synced to disk. + * @return options + */ + public DocumentDeleteOptions waitForSync(final Boolean waitForSync) { + this.waitForSync = waitForSync; + return this; + } + + public String getIfMatch() { + return ifMatch; + } + + /** + * @param ifMatch remove a document based on a target revision + * @return options + */ + public DocumentDeleteOptions ifMatch(final String ifMatch) { + this.ifMatch = ifMatch; + return this; + } + + public Boolean getReturnOld() { + return returnOld; + } + + /** + * @param returnOld Return additionally the complete previous revision of the changed document under the + * attribute old in + * the result. + * @return options + */ + public DocumentDeleteOptions returnOld(final Boolean returnOld) { + this.returnOld = returnOld; + return this; + } + + public Boolean getSilent() { + return silent; + } + + /** + * @param silent If set to true, an empty object will be returned as response. No meta-data will be returned for the + * created document. This option can be used to save some network traffic. + * @return options + */ + public DocumentDeleteOptions silent(final Boolean silent) { + this.silent = silent; + return this; + } + + public Boolean getRefillIndexCaches() { + return refillIndexCaches; + } + + /** + * @param refillIndexCaches Whether to delete an existing entry from the in-memory edge cache and refill it with + * another edge if an edge document is removed. + * @return options + * @since ArangoDB 3.11 + */ + public DocumentDeleteOptions refillIndexCaches(Boolean refillIndexCaches) { + this.refillIndexCaches = refillIndexCaches; + return this; + } + + public Boolean getIgnoreRevs() { + return ignoreRevs; + } + + /** + * @param ignoreRevs If set to true, ignore any _rev attribute in the selectors. No revision check is performed. + * If set to false then revisions are checked. The default is true. + * @return options + */ + public DocumentDeleteOptions ignoreRevs(final Boolean ignoreRevs) { + this.ignoreRevs = ignoreRevs; + return this; + } +} diff --git a/core/src/main/java/com/arangodb/model/DocumentExistsOptions.java b/core/src/main/java/com/arangodb/model/DocumentExistsOptions.java new file mode 100644 index 000000000..03c533017 --- /dev/null +++ b/core/src/main/java/com/arangodb/model/DocumentExistsOptions.java @@ -0,0 +1,63 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.model; + +/** + * @author Mark Vollmary + * @author Michele Rastelli + */ +public final class DocumentExistsOptions extends TransactionalOptions { + + private String ifNoneMatch; + private String ifMatch; + + @Override + DocumentExistsOptions getThis() { + return this; + } + + public String getIfNoneMatch() { + return ifNoneMatch; + } + + /** + * @param ifNoneMatch document revision must not contain If-None-Match + * @return options + */ + public DocumentExistsOptions ifNoneMatch(final String ifNoneMatch) { + this.ifNoneMatch = ifNoneMatch; + return this; + } + + public String getIfMatch() { + return ifMatch; + } + + /** + * @param ifMatch document revision must contain If-Match + * @return options + */ + public DocumentExistsOptions ifMatch(final String ifMatch) { + this.ifMatch = ifMatch; + return this; + } + +} diff --git a/core/src/main/java/com/arangodb/model/DocumentImportOptions.java b/core/src/main/java/com/arangodb/model/DocumentImportOptions.java new file mode 100644 index 000000000..42612a8e8 --- /dev/null +++ b/core/src/main/java/com/arangodb/model/DocumentImportOptions.java @@ -0,0 +1,162 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.model; + +/** + * @author Mark Vollmary + */ +public final class DocumentImportOptions { + + private String fromPrefix; + private String toPrefix; + private Boolean overwrite; + private Boolean waitForSync; + private OnDuplicate onDuplicate; + private Boolean complete; + private Boolean details; + + public DocumentImportOptions() { + super(); + } + + public String getFromPrefix() { + return fromPrefix; + } + + /** + * @param fromPrefix An optional prefix for the values in _from attributes. If specified, the value is automatically + * prepended to each _from input value. This allows specifying just the keys for _from. + * @return options + */ + public DocumentImportOptions fromPrefix(final String fromPrefix) { + this.fromPrefix = fromPrefix; + return this; + } + + public String getToPrefix() { + return toPrefix; + } + + /** + * @param toPrefix An optional prefix for the values in _to attributes. If specified, the value is automatically + * prepended to each _to input value. This allows specifying just the keys for _to. + * @return options + */ + public DocumentImportOptions toPrefix(final String toPrefix) { + this.toPrefix = toPrefix; + return this; + } + + public Boolean getOverwrite() { + return overwrite; + } + + /** + * @param overwrite If this parameter has a value of true, then all data in the collection will be removed prior + * to the + * import. Note that any existing index definitions will be preserved. + * @return options + */ + public DocumentImportOptions overwrite(final Boolean overwrite) { + this.overwrite = overwrite; + return this; + } + + public Boolean getWaitForSync() { + return waitForSync; + } + + /** + * @param waitForSync Wait until documents have been synced to disk before returning. + * @return options + */ + public DocumentImportOptions waitForSync(final Boolean waitForSync) { + this.waitForSync = waitForSync; + return this; + } + + public OnDuplicate getOnDuplicate() { + return onDuplicate; + } + + /** + * @param onDuplicate Controls what action is carried out in case of a unique key constraint violation. Possible + * values are: + *

    + *
  • error: this will not import the current document because of the unique key constraint + * violation. + * This is the default setting.
  • + *
  • update: this will update an existing document in the database with the data specified + * in the + * request. Attributes of the existing document that are not present in the request will be + * preserved.
  • + *
  • replace: this will replace an existing document in the database with the data specified + * in the + * request.
  • + *
  • ignore: this will not update an existing document and simply ignore the error caused by + * the unique + * key constraint violation. Note that update, replace and ignore will only work when the + * import document + * in the request contains the _key attribute. update and replace may also fail because of + * secondary + * unique key constraint violations.
  • + *
+ * @return options + */ + public DocumentImportOptions onDuplicate(final OnDuplicate onDuplicate) { + this.onDuplicate = onDuplicate; + return this; + } + + public Boolean getComplete() { + return complete; + } + + /** + * @param complete If set to true, it will make the whole import fail if any error occurs. Otherwise the import will + * continue even if some documents cannot be imported. + * @return options + */ + public DocumentImportOptions complete(final Boolean complete) { + this.complete = complete; + return this; + } + + public Boolean getDetails() { + return details; + } + + /** + * @param details If set to true, the result will include an attribute details with details about documents that + * could + * not be imported. + * @return options + */ + public DocumentImportOptions details(final Boolean details) { + this.details = details; + return this; + } + + public enum OnDuplicate { + error, update, replace, ignore + } + +} diff --git a/core/src/main/java/com/arangodb/model/DocumentReadOptions.java b/core/src/main/java/com/arangodb/model/DocumentReadOptions.java new file mode 100644 index 000000000..74b976aae --- /dev/null +++ b/core/src/main/java/com/arangodb/model/DocumentReadOptions.java @@ -0,0 +1,80 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.model; + +/** + * @author Mark Vollmary + * @author Michele Rastelli + */ +public final class DocumentReadOptions extends TransactionalOptions { + + private String ifNoneMatch; + private String ifMatch; + private Boolean allowDirtyRead; + + @Override + DocumentReadOptions getThis() { + return this; + } + + public String getIfNoneMatch() { + return ifNoneMatch; + } + + /** + * @param ifNoneMatch document revision must not contain If-None-Match + * @return options + */ + public DocumentReadOptions ifNoneMatch(final String ifNoneMatch) { + this.ifNoneMatch = ifNoneMatch; + return this; + } + + public String getIfMatch() { + return ifMatch; + } + + /** + * @param ifMatch document revision must contain If-Match + * @return options + */ + public DocumentReadOptions ifMatch(final String ifMatch) { + this.ifMatch = ifMatch; + return this; + } + + /** + * @param allowDirtyRead Set to {@code true} allows reading from followers in an active-failover setup. + * @return options + * @see API + * Documentation + * @since ArangoDB 3.4.0 + */ + public DocumentReadOptions allowDirtyRead(final Boolean allowDirtyRead) { + this.allowDirtyRead = allowDirtyRead; + return this; + } + + public Boolean getAllowDirtyRead() { + return allowDirtyRead; + } + +} diff --git a/core/src/main/java/com/arangodb/model/DocumentReplaceOptions.java b/core/src/main/java/com/arangodb/model/DocumentReplaceOptions.java new file mode 100644 index 000000000..f24d7fe13 --- /dev/null +++ b/core/src/main/java/com/arangodb/model/DocumentReplaceOptions.java @@ -0,0 +1,170 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.model; + +/** + * @author Mark Vollmary + * @author Michele Rastelli + */ +public final class DocumentReplaceOptions extends TransactionalOptions { + + private Boolean waitForSync; + private Boolean ignoreRevs; + private String ifMatch; + private Boolean returnNew; + private Boolean returnOld; + private Boolean silent; + private Boolean refillIndexCaches; + private String versionAttribute; + + @Override + DocumentReplaceOptions getThis() { + return this; + } + + public Boolean getWaitForSync() { + return waitForSync; + } + + /** + * @param waitForSync Wait until document has been synced to disk. + * @return options + */ + public DocumentReplaceOptions waitForSync(final Boolean waitForSync) { + this.waitForSync = waitForSync; + return this; + } + + public Boolean getIgnoreRevs() { + return ignoreRevs; + } + + /** + * @param ignoreRevs By default, or if this is set to true, the _rev attributes in the given document is ignored. + * If this + * is set to false, then the _rev attribute given in the body document is taken as a + * precondition. The + * document is only replaced if the current revision is the one specified. + * @return options + */ + public DocumentReplaceOptions ignoreRevs(final Boolean ignoreRevs) { + this.ignoreRevs = ignoreRevs; + return this; + } + + public String getIfMatch() { + return ifMatch; + } + + /** + * @param ifMatch replace a document based on target revision + * @return options + */ + public DocumentReplaceOptions ifMatch(final String ifMatch) { + this.ifMatch = ifMatch; + return this; + } + + public Boolean getReturnNew() { + return returnNew; + } + + /** + * @param returnNew Return additionally the complete new document under the attribute new in the result. + * @return options + */ + public DocumentReplaceOptions returnNew(final Boolean returnNew) { + this.returnNew = returnNew; + return this; + } + + public Boolean getReturnOld() { + return returnOld; + } + + /** + * @param returnOld Return additionally the complete previous revision of the changed document under the + * attribute old in + * the result. + * @return options + */ + public DocumentReplaceOptions returnOld(final Boolean returnOld) { + this.returnOld = returnOld; + return this; + } + + public Boolean getSilent() { + return silent; + } + + /** + * @param silent If set to true, an empty object will be returned as response. No meta-data will be returned for the + * created document. This option can be used to save some network traffic. + * @return options + */ + public DocumentReplaceOptions silent(final Boolean silent) { + this.silent = silent; + return this; + } + + public Boolean getRefillIndexCaches() { + return refillIndexCaches; + } + + /** + * @param refillIndexCaches Whether to update an existing entry in the in-memory edge cache if an edge document is + * replaced. + * @return options + * @since ArangoDB 3.11 + */ + public DocumentReplaceOptions refillIndexCaches(Boolean refillIndexCaches) { + this.refillIndexCaches = refillIndexCaches; + return this; + } + + public String getVersionAttribute() { + return versionAttribute; + } + + /** + * You can use the {@code versionAttribute} option for external versioning support. + * If set, the attribute with the name specified by the option is looked up in the stored document and the attribute + * value is compared numerically to the value of the versioning attribute in the supplied document that is supposed + * to update/replace it. + * If the version number in the new document is higher (rounded down to a whole number) than in the document that + * already exists in the database, then the update/replace operation is performed normally. This is also the case if + * the new versioning attribute has a non-numeric value, if it is a negative number, or if the attribute doesn't + * exist in the supplied or stored document. + * If the version number in the new document is lower or equal to what exists in the database, the operation is not + * performed and the existing document thus not changed. No error is returned in this case. + * The attribute can only be a top-level attribute. + * You can check if _oldRev (if present) and _rev are different to determine if the document has been changed. + * + * @param versionAttribute the attribute name to use for versioning + * @return options + * @since ArangoDB 3.12 + */ + public DocumentReplaceOptions versionAttribute(String versionAttribute) { + this.versionAttribute = versionAttribute; + return this; + } + +} diff --git a/core/src/main/java/com/arangodb/model/DocumentUpdateOptions.java b/core/src/main/java/com/arangodb/model/DocumentUpdateOptions.java new file mode 100644 index 000000000..9987e20f8 --- /dev/null +++ b/core/src/main/java/com/arangodb/model/DocumentUpdateOptions.java @@ -0,0 +1,205 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.model; + +/** + * @author Mark Vollmary + * @author Michele Rastelli + */ +public final class DocumentUpdateOptions extends TransactionalOptions { + + private Boolean keepNull; + private Boolean mergeObjects; + private Boolean waitForSync; + private Boolean ignoreRevs; + private String ifMatch; + private Boolean returnNew; + private Boolean returnOld; + private Boolean silent; + private Boolean refillIndexCaches; + private String versionAttribute; + + @Override + DocumentUpdateOptions getThis() { + return this; + } + + public Boolean getKeepNull() { + return keepNull; + } + + /** + * @param keepNull If the intention is to delete existing attributes with the patch command, the URL query parameter + * keepNull can be used with a value of false. This will modify the behavior of the patch command to + * remove any attributes from the existing document that are contained in the patch document with an + * attribute value of null. + * @return options + */ + public DocumentUpdateOptions keepNull(final Boolean keepNull) { + this.keepNull = keepNull; + return this; + } + + public Boolean getMergeObjects() { + return mergeObjects; + } + + /** + * @param mergeObjects Controls whether objects (not arrays) will be merged if present in both the existing and + * the patch + * document. If set to false, the value in the patch document will overwrite the existing + * document's + * value. If set to true, objects will be merged. The default is true. + * @return options + */ + public DocumentUpdateOptions mergeObjects(final Boolean mergeObjects) { + this.mergeObjects = mergeObjects; + return this; + } + + public Boolean getWaitForSync() { + return waitForSync; + } + + /** + * @param waitForSync Wait until document has been synced to disk. + * @return options + */ + public DocumentUpdateOptions waitForSync(final Boolean waitForSync) { + this.waitForSync = waitForSync; + return this; + } + + public Boolean getIgnoreRevs() { + return ignoreRevs; + } + + /** + * @param ignoreRevs By default, or if this is set to true, the _rev attributes in the given document is ignored. + * If this + * is set to false, then the _rev attribute given in the body document is taken as a + * precondition. The + * document is only updated if the current revision is the one specified. + * @return options + */ + public DocumentUpdateOptions ignoreRevs(final Boolean ignoreRevs) { + this.ignoreRevs = ignoreRevs; + return this; + } + + public String getIfMatch() { + return ifMatch; + } + + /** + * @param ifMatch update a document based on target revision + * @return options + */ + public DocumentUpdateOptions ifMatch(final String ifMatch) { + this.ifMatch = ifMatch; + return this; + } + + public Boolean getReturnNew() { + return returnNew; + } + + /** + * @param returnNew Return additionally the complete new document under the attribute new in the result. + * @return options + */ + public DocumentUpdateOptions returnNew(final Boolean returnNew) { + this.returnNew = returnNew; + return this; + } + + public Boolean getReturnOld() { + return returnOld; + } + + /** + * @param returnOld Return additionally the complete previous revision of the changed document under the + * attribute old in + * the result. + * @return options + */ + public DocumentUpdateOptions returnOld(final Boolean returnOld) { + this.returnOld = returnOld; + return this; + } + + public Boolean getSilent() { + return silent; + } + + /** + * @param silent If set to true, an empty object will be returned as response. No meta-data will be returned for the + * created document. This option can be used to save some network traffic. + * @return options + */ + public DocumentUpdateOptions silent(final Boolean silent) { + this.silent = silent; + return this; + } + + public Boolean getRefillIndexCaches() { + return refillIndexCaches; + } + + /** + * @param refillIndexCaches Whether to update an existing entry in the in-memory edge cache if an edge document is + * updated. + * @return options + * @since ArangoDB 3.11 + */ + public DocumentUpdateOptions refillIndexCaches(Boolean refillIndexCaches) { + this.refillIndexCaches = refillIndexCaches; + return this; + } + + public String getVersionAttribute() { + return versionAttribute; + } + + /** + * You can use the {@code versionAttribute} option for external versioning support. + * If set, the attribute with the name specified by the option is looked up in the stored document and the attribute + * value is compared numerically to the value of the versioning attribute in the supplied document that is supposed + * to update/replace it. + * If the version number in the new document is higher (rounded down to a whole number) than in the document that + * already exists in the database, then the update/replace operation is performed normally. This is also the case if + * the new versioning attribute has a non-numeric value, if it is a negative number, or if the attribute doesn't + * exist in the supplied or stored document. + * If the version number in the new document is lower or equal to what exists in the database, the operation is not + * performed and the existing document thus not changed. No error is returned in this case. + * The attribute can only be a top-level attribute. + * You can check if _oldRev (if present) and _rev are different to determine if the document has been changed. + * + * @param versionAttribute the attribute name to use for versioning + * @return options + * @since ArangoDB 3.12 + */ + public DocumentUpdateOptions versionAttribute(String versionAttribute) { + this.versionAttribute = versionAttribute; + return this; + } + +} diff --git a/core/src/main/java/com/arangodb/model/EdgeCollectionDropOptions.java b/core/src/main/java/com/arangodb/model/EdgeCollectionDropOptions.java new file mode 100644 index 000000000..6cc2d3a08 --- /dev/null +++ b/core/src/main/java/com/arangodb/model/EdgeCollectionDropOptions.java @@ -0,0 +1,37 @@ +package com.arangodb.model; + +/** + * @deprecated use {@link EdgeCollectionRemoveOptions} instead + */ +@Deprecated +public class EdgeCollectionDropOptions { + private Boolean waitForSync; + private Boolean dropCollections; + + public Boolean getWaitForSync() { + return waitForSync; + } + + /** + * @param waitForSync Define if the request should wait until synced to disk. + * @return this + */ + public EdgeCollectionDropOptions waitForSync(Boolean waitForSync) { + this.waitForSync = waitForSync; + return this; + } + + public Boolean getDropCollections() { + return dropCollections; + } + + /** + * @param dropCollections Drop the collection as well. Collection will only be dropped if it is not used in other + * graphs. + * @return this + */ + public EdgeCollectionDropOptions dropCollections(Boolean dropCollections) { + this.dropCollections = dropCollections; + return this; + } +} diff --git a/core/src/main/java/com/arangodb/model/EdgeCollectionRemoveOptions.java b/core/src/main/java/com/arangodb/model/EdgeCollectionRemoveOptions.java new file mode 100644 index 000000000..c1245d833 --- /dev/null +++ b/core/src/main/java/com/arangodb/model/EdgeCollectionRemoveOptions.java @@ -0,0 +1,33 @@ +package com.arangodb.model; + +public class EdgeCollectionRemoveOptions { + private Boolean waitForSync; + private Boolean dropCollections; + + public Boolean getWaitForSync() { + return waitForSync; + } + + /** + * @param waitForSync Define if the request should wait until synced to disk. + * @return this + */ + public EdgeCollectionRemoveOptions waitForSync(Boolean waitForSync) { + this.waitForSync = waitForSync; + return this; + } + + public Boolean getDropCollections() { + return dropCollections; + } + + /** + * @param dropCollections Drop the collection as well. Collection will only be dropped if it is not used in other + * graphs. + * @return this + */ + public EdgeCollectionRemoveOptions dropCollections(Boolean dropCollections) { + this.dropCollections = dropCollections; + return this; + } +} diff --git a/core/src/main/java/com/arangodb/model/EdgeCreateOptions.java b/core/src/main/java/com/arangodb/model/EdgeCreateOptions.java new file mode 100644 index 000000000..5523a7dab --- /dev/null +++ b/core/src/main/java/com/arangodb/model/EdgeCreateOptions.java @@ -0,0 +1,48 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.model; + +/** + * @author Mark Vollmary + */ +public final class EdgeCreateOptions extends TransactionalOptions { + + private Boolean waitForSync; + + @Override + EdgeCreateOptions getThis() { + return this; + } + + public Boolean getWaitForSync() { + return waitForSync; + } + + /** + * @param waitForSync Wait until document has been synced to disk. + * @return options + */ + public EdgeCreateOptions waitForSync(final Boolean waitForSync) { + this.waitForSync = waitForSync; + return this; + } + +} diff --git a/core/src/main/java/com/arangodb/model/EdgeDeleteOptions.java b/core/src/main/java/com/arangodb/model/EdgeDeleteOptions.java new file mode 100644 index 000000000..25d7fab4f --- /dev/null +++ b/core/src/main/java/com/arangodb/model/EdgeDeleteOptions.java @@ -0,0 +1,62 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.model; + +/** + * @author Mark Vollmary + */ +public final class EdgeDeleteOptions extends TransactionalOptions { + + private Boolean waitForSync; + private String ifMatch; + + @Override + EdgeDeleteOptions getThis() { + return this; + } + + public Boolean getWaitForSync() { + return waitForSync; + } + + /** + * @param waitForSync Wait until deletion operation has been synced to disk. + * @return options + */ + public EdgeDeleteOptions waitForSync(final Boolean waitForSync) { + this.waitForSync = waitForSync; + return this; + } + + public String getIfMatch() { + return ifMatch; + } + + /** + * @param ifMatch remove a document based on a target revision + * @return options + */ + public EdgeDeleteOptions ifMatch(final String ifMatch) { + this.ifMatch = ifMatch; + return this; + } + +} diff --git a/core/src/main/java/com/arangodb/model/EdgeReplaceOptions.java b/core/src/main/java/com/arangodb/model/EdgeReplaceOptions.java new file mode 100644 index 000000000..7e298d963 --- /dev/null +++ b/core/src/main/java/com/arangodb/model/EdgeReplaceOptions.java @@ -0,0 +1,62 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.model; + +/** + * @author Mark Vollmary + */ +public final class EdgeReplaceOptions extends TransactionalOptions { + + private Boolean waitForSync; + private String ifMatch; + + @Override + EdgeReplaceOptions getThis() { + return this; + } + + public Boolean getWaitForSync() { + return waitForSync; + } + + /** + * @param waitForSync Wait until document has been synced to disk. + * @return options + */ + public EdgeReplaceOptions waitForSync(final Boolean waitForSync) { + this.waitForSync = waitForSync; + return this; + } + + public String getIfMatch() { + return ifMatch; + } + + /** + * @param ifMatch replace a document based on target revision + * @return options + */ + public EdgeReplaceOptions ifMatch(final String ifMatch) { + this.ifMatch = ifMatch; + return this; + } + +} diff --git a/core/src/main/java/com/arangodb/model/EdgeUpdateOptions.java b/core/src/main/java/com/arangodb/model/EdgeUpdateOptions.java new file mode 100644 index 000000000..03b2e00a1 --- /dev/null +++ b/core/src/main/java/com/arangodb/model/EdgeUpdateOptions.java @@ -0,0 +1,79 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.model; + +/** + * @author Mark Vollmary + */ +public final class EdgeUpdateOptions extends TransactionalOptions { + + private Boolean keepNull; + private Boolean waitForSync; + private String ifMatch; + + @Override + EdgeUpdateOptions getThis() { + return this; + } + + public Boolean getKeepNull() { + return keepNull; + } + + /** + * @param keepNull If the intention is to delete existing attributes with the patch command, the URL query parameter + * keepNull can be used with a value of false. This will modify the behavior of the patch command to + * remove any attributes from the existing document that are contained in the patch document with an + * attribute value of null. + * @return options + */ + public EdgeUpdateOptions keepNull(final Boolean keepNull) { + this.keepNull = keepNull; + return this; + } + + public Boolean getWaitForSync() { + return waitForSync; + } + + /** + * @param waitForSync Wait until document has been synced to disk. + * @return options + */ + public EdgeUpdateOptions waitForSync(final Boolean waitForSync) { + this.waitForSync = waitForSync; + return this; + } + + public String getIfMatch() { + return ifMatch; + } + + /** + * @param ifMatch replace a document based on target revision + * @return options + */ + public EdgeUpdateOptions ifMatch(final String ifMatch) { + this.ifMatch = ifMatch; + return this; + } + +} diff --git a/core/src/main/java/com/arangodb/model/ExplainAqlQueryOptions.java b/core/src/main/java/com/arangodb/model/ExplainAqlQueryOptions.java new file mode 100644 index 000000000..827670cf5 --- /dev/null +++ b/core/src/main/java/com/arangodb/model/ExplainAqlQueryOptions.java @@ -0,0 +1,616 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.model; + +import com.arangodb.internal.serde.UserDataInside; +import com.fasterxml.jackson.annotation.JsonIgnore; + +import java.util.Arrays; +import java.util.Collection; +import java.util.Map; + +/** + * @author Michele Rastelli + */ +public final class ExplainAqlQueryOptions { + + private Map bindVars; + private String query; + private AqlQueryOptions.Options options; + + public ExplainAqlQueryOptions() { + super(); + } + + @UserDataInside + public Map getBindVars() { + return bindVars; + } + + /** + * @param bindVars key/value pairs representing the bind parameters + * @return options + */ + ExplainAqlQueryOptions bindVars(final Map bindVars) { + this.bindVars = bindVars; + return this; + } + + public String getQuery() { + return query; + } + + /** + * @param query the query which you want explained + * @return options + */ + ExplainAqlQueryOptions query(final String query) { + this.query = query; + return this; + } + + public AqlQueryOptions.Options getOptions() { + if (options == null) { + options = new AqlQueryOptions.Options(); + } + return options; + } + + public ExplainAqlQueryOptions options(final AqlQueryOptions.Options options) { + this.options = options; + return this; + } + + + // ------------------------------------ + // --- accessors for nested options --- + // ------------------------------------ + + @JsonIgnore + public Map getCustomOptions() { + return getOptions().getCustomOptions(); + } + + /** + * Set an additional custom option in the form of key-value pair. + * + * @param key option name + * @param value option value + * @return this + */ + public ExplainAqlQueryOptions customOption(String key, Object value) { + getOptions().setCustomOption(key, value); + return this; + } + + @JsonIgnore + public Boolean getAllPlans() { + return getOptions().getAllPlans(); + } + + /** + * @param value if set to true, all possible execution plans will be returned. The default is false, meaning only + * the optimal plan will be returned. + * @return this + */ + public ExplainAqlQueryOptions allPlans(final Boolean value) { + getOptions().setAllPlans(value); + return this; + } + + @JsonIgnore + public Boolean getAllowDirtyReads() { + return getOptions().getAllowDirtyReads(); + } + + /** + * @param allowDirtyReads If you set this option to true and execute the query against a cluster deployment, then + * the Coordinator is allowed to read from any shard replica and not only from the leader. + * You may observe data inconsistencies (dirty reads) when reading from followers, namely + * obsolete revisions of documents because changes have not yet been replicated to the + * follower, as well as changes to documents before they are officially committed on the + * leader. This feature is only available in the Enterprise Edition. + * @return this + */ + public ExplainAqlQueryOptions allowDirtyReads(final Boolean allowDirtyReads) { + getOptions().setAllowDirtyReads(allowDirtyReads); + return this; + } + + @JsonIgnore + public Boolean getAllowRetry() { + return getOptions().getAllowRetry(); + } + + /** + * @param allowRetry Set this option to true to make it possible to retry fetching the latest batch from a cursor. + *

+ * This makes possible to safely retry invoking {@link com.arangodb.ArangoCursor#next()} in + * case of I/O exceptions (which are actually thrown as {@link com.arangodb.ArangoDBException} + * with cause {@link java.io.IOException}) + *

+ * If set to false (default), then it is not safe to retry invoking + * {@link com.arangodb.ArangoCursor#next()} in case of I/O exceptions, since the request to + * fetch the next batch is not idempotent (i.e. the cursor may advance multiple times on the + * server). + *

+ * Note: once you successfully received the last batch, you should call + * {@link com.arangodb.ArangoCursor#close()} so that the server does not unnecessary keep the + * batch until the cursor times out ({@link AqlQueryOptions#ttl(Integer)}). + * @return this + * @since ArangoDB 3.11 + */ + public ExplainAqlQueryOptions allowRetry(final Boolean allowRetry) { + getOptions().setAllowRetry(allowRetry); + return this; + } + + @JsonIgnore + public Boolean getFailOnWarning() { + return getOptions().getFailOnWarning(); + } + + /** + * @param failOnWarning When set to true, the query will throw an exception and abort instead of producing a + * warning. This option should be used during development to catch potential issues early. + * When the attribute is set to false, warnings will not be propagated to exceptions and will + * be returned with the query result. There is also a server configuration option + * --query.fail-on-warning for setting the default value for failOnWarning so it does not + * need to be set on a per-query level. + * @return this + */ + public ExplainAqlQueryOptions failOnWarning(final Boolean failOnWarning) { + getOptions().setFailOnWarning(failOnWarning); + return this; + } + + @JsonIgnore + public Boolean getFillBlockCache() { + return getOptions().getFillBlockCache(); + } + + /** + * @param fillBlockCache if set to true or not specified, this will make the query store + * the data it reads via the RocksDB storage engine in the RocksDB block cache. This is + * usually the desired behavior. The option can be set to false for queries that + * are known to either read a lot of data that would thrash the block cache, or for queries + * that read data known to be outside of the hot set. By setting the option + * to false, data read by the query will not make it into the RocksDB block + * cache if it is not already in there, thus leaving more room for the actual hot set. + * @return this + * @since ArangoDB 3.8.1 + */ + public ExplainAqlQueryOptions fillBlockCache(final Boolean fillBlockCache) { + getOptions().setFillBlockCache(fillBlockCache); + return this; + } + + @JsonIgnore + public String getForceOneShardAttributeValue() { + return getOptions().getForceOneShardAttributeValue(); + } + + /** + * @param forceOneShardAttributeValue This query option can be used in complex queries in case the query optimizer + * cannot automatically detect that the query can be limited to only a single + * server (e.g. in a disjoint smart graph case). + *

+ * If the option is set incorrectly, i.e. to a wrong shard key value, then the + * query may be shipped to a wrong DB server and may not return results (i.e. + * empty result set). + *

+ * Use at your own risk. + * @return this + */ + public ExplainAqlQueryOptions forceOneShardAttributeValue(final String forceOneShardAttributeValue) { + getOptions().setForceOneShardAttributeValue(forceOneShardAttributeValue); + return this; + } + + @JsonIgnore + public Boolean getFullCount() { + return getOptions().getFullCount(); + } + + /** + * @param fullCount if set to true and the query contains a LIMIT clause, then the result will have an extra + * attribute + * with the sub-attributes stats and fullCount, { ... , "extra": { "stats": { "fullCount": 123 } + * } }. The + * fullCount attribute will contain the number of documents in the result before the last LIMIT + * in the + * query was applied. It can be used to count the number of documents that match certain filter + * criteria, + * but only return a subset of them, in one go. It is thus similar to MySQL's + * SQL_CALC_FOUND_ROWS hint. + * Note that setting the option will disable a few LIMIT optimizations and may lead to more + * documents + * being processed, and thus make queries run longer. Note that the fullCount attribute will + * only be + * present in the result if the query has a LIMIT clause and the LIMIT clause is actually used + * in the + * query. + * @return this + */ + public ExplainAqlQueryOptions fullCount(final Boolean fullCount) { + getOptions().setFullCount(fullCount); + return this; + } + + @JsonIgnore + public Long getIntermediateCommitCount() { + return getOptions().getIntermediateCommitCount(); + } + + /** + * @param intermediateCommitCount Maximum number of operations after which an intermediate commit is performed + * automatically. Honored by + * the RocksDB storage engine only. + * @return this + * @since ArangoDB 3.2.0 + */ + public ExplainAqlQueryOptions intermediateCommitCount(final Long intermediateCommitCount) { + getOptions().setIntermediateCommitCount(intermediateCommitCount); + return this; + } + + @JsonIgnore + public Long getIntermediateCommitSize() { + return getOptions().getIntermediateCommitSize(); + } + + /** + * @param intermediateCommitSize Maximum total size of operations after which an intermediate commit is performed + * automatically. + * Honored by the RocksDB storage engine only. + * @return this + * @since ArangoDB 3.2.0 + */ + public ExplainAqlQueryOptions intermediateCommitSize(final Long intermediateCommitSize) { + getOptions().setIntermediateCommitSize(intermediateCommitSize); + return this; + } + + @JsonIgnore + public Integer getMaxDNFConditionMembers() { + return getOptions().getMaxDNFConditionMembers(); + } + + /** + * @param maxDNFConditionMembers A threshold for the maximum number of OR sub-nodes in the internal representation + * of an AQL FILTER condition. + *

+ * Yon can use this option to limit the computation time and memory usage when + * converting complex AQL FILTER conditions into the internal DNF (disjunctive normal + * form) format. FILTER conditions with a lot of logical branches (AND, OR, NOT) can + * take a large amount of processing time and memory. This query option limits the + * computation time and memory usage for such conditions. + *

+ * Once the threshold value is reached during the DNF conversion of a FILTER + * condition, the conversion is aborted, and the query continues with a simplified + * internal representation of the condition, which cannot be used for index lookups. + *

+ * You can set the threshold globally instead of per query with the + * --query.max-dnf-condition-members startup option. + * @return this + */ + public ExplainAqlQueryOptions maxDNFConditionMembers(final Integer maxDNFConditionMembers) { + getOptions().setMaxDNFConditionMembers(maxDNFConditionMembers); + return this; + } + + @JsonIgnore + public Integer getMaxNodesPerCallstack() { + return getOptions().getMaxNodesPerCallstack(); + } + + /** + * @param maxNodesPerCallstack The number of execution nodes in the query plan after that stack splitting is + * performed to avoid a potential stack overflow. Defaults to the configured value of + * the startup option --query.max-nodes-per-callstack. + *

+ * This option is only useful for testing and debugging and normally does not need any + * adjustment. + * @return this + */ + public ExplainAqlQueryOptions maxNodesPerCallstack(final Integer maxNodesPerCallstack) { + getOptions().setMaxNodesPerCallstack(maxNodesPerCallstack); + return this; + } + + @JsonIgnore + public Integer getMaxNumberOfPlans() { + return getOptions().getMaxNumberOfPlans(); + } + + /** + * @param maxNumberOfPlans Limits the maximum number of plans that are created by the AQL query optimizer. + * @return this + */ + public ExplainAqlQueryOptions maxNumberOfPlans(final Integer maxNumberOfPlans) { + getOptions().setMaxNumberOfPlans(maxNumberOfPlans); + return this; + } + + @JsonIgnore + public Double getMaxRuntime() { + return getOptions().getMaxRuntime(); + } + + /** + * @param maxRuntime The query has to be executed within the given runtime or it will be killed. The value is specified + * in seconds. The default value is 0.0 (no timeout). + * @return this + */ + public ExplainAqlQueryOptions maxRuntime(final Double maxRuntime) { + getOptions().setMaxRuntime(maxRuntime); + return this; + } + + @JsonIgnore + public Long getMaxTransactionSize() { + return getOptions().getMaxTransactionSize(); + } + + /** + * @param maxTransactionSize Transaction size limit in bytes. Honored by the RocksDB storage engine only. + * @return this + * @since ArangoDB 3.2.0 + */ + public ExplainAqlQueryOptions maxTransactionSize(final Long maxTransactionSize) { + getOptions().setMaxTransactionSize(maxTransactionSize); + return this; + } + + @JsonIgnore + public Long getMaxWarningCount() { + return getOptions().getMaxWarningCount(); + } + + /** + * @param maxWarningCount Limits the maximum number of warnings a query will return. The number of warnings a + * query will return + * is limited to 10 by default, but that number can be increased or decreased by setting + * this attribute. + * @return this + * @since ArangoDB 3.2.0 + */ + public ExplainAqlQueryOptions maxWarningCount(final Long maxWarningCount) { + getOptions().setMaxWarningCount(maxWarningCount); + return this; + } + + @JsonIgnore + public AqlQueryOptions.Optimizer getOptimizer() { + return getOptions().getOptimizer(); + } + + /** + * @param optimizer Options related to the query optimizer. + * @return this + */ + public ExplainAqlQueryOptions optimizer(final AqlQueryOptions.Optimizer optimizer) { + getOptions().setOptimizer(optimizer); + return this; + } + + @JsonIgnore + public Boolean getProfile() { + return getOptions().getProfile(); + } + + /** + * @param profile If set to true, then the additional query profiling information will be returned in the + * sub-attribute + * profile of the extra return attribute if the query result is not served from the query cache. + * @return this + */ + public ExplainAqlQueryOptions profile(final Boolean profile) { + getOptions().setProfile(profile); + return this; + } + + @JsonIgnore + public Double getSatelliteSyncWait() { + return getOptions().getSatelliteSyncWait(); + } + + /** + * @param satelliteSyncWait This enterprise parameter allows to configure how long a DBServer will have time to + * bring the + * satellite collections involved in the query into sync. The default value is 60.0 + * (seconds). When the + * max time has been reached the query will be stopped. + * @return this + * @since ArangoDB 3.2.0 + */ + public ExplainAqlQueryOptions satelliteSyncWait(final Double satelliteSyncWait) { + getOptions().setSatelliteSyncWait(satelliteSyncWait); + return this; + } + + @JsonIgnore + public Collection getShardIds() { + return getOptions().getShardIds(); + } + + /** + * Restrict query to shards by given ids. This is an internal option. Use at your own risk. + * + * @param shardIds + * @return this + */ + public ExplainAqlQueryOptions shardIds(final String... shardIds) { + getOptions().setShardIds(Arrays.asList(shardIds)); + return this; + } + + @JsonIgnore + public Boolean getSkipInaccessibleCollections() { + return getOptions().getSkipInaccessibleCollections(); + } + + /** + * @param skipInaccessibleCollections AQL queries (especially graph traversals) will treat collection to which a + * user has no access rights + * as if these collections were empty. Instead of returning a forbidden access + * error, your queries will + * execute normally. This is intended to help with certain use-cases: A graph + * contains several + * collections and different users execute AQL queries on that graph. You can + * now naturally limit the + * accessible results by changing the access rights of users on collections. + * This feature is only + * available in the Enterprise Edition. + * @return this + * @since ArangoDB 3.2.0 + */ + public ExplainAqlQueryOptions skipInaccessibleCollections(final Boolean skipInaccessibleCollections) { + getOptions().setSkipInaccessibleCollections(skipInaccessibleCollections); + return this; + } + + @JsonIgnore + public Long getSpillOverThresholdMemoryUsage() { + return getOptions().getSpillOverThresholdMemoryUsage(); + } + + /** + * @param spillOverThresholdMemoryUsage This option allows queries to store intermediate and final results + * temporarily on disk if the amount of memory used (in bytes) exceeds the + * specified value. This is used for decreasing the memory usage during the + * query execution. + *

+ * This option only has an effect on queries that use the SORT operation but + * without a LIMIT, and if you enable the spillover feature by setting a path + * for the directory to store the temporary data in with the + * --temp.intermediate-results-path startup option. + *

+ * Default value: 128MB. + *

+ * Spilling data from RAM onto disk is an experimental feature and is turned + * off by default. The query results are still built up entirely in RAM on + * Coordinators and single servers for non-streaming queries. To avoid the + * buildup of the entire query result in RAM, use a streaming query (see the + * stream option). + * @return this + */ + public ExplainAqlQueryOptions spillOverThresholdMemoryUsage(final Long spillOverThresholdMemoryUsage) { + getOptions().setSpillOverThresholdMemoryUsage(spillOverThresholdMemoryUsage); + return this; + } + + @JsonIgnore + public Long getSpillOverThresholdNumRows() { + return getOptions().getSpillOverThresholdNumRows(); + } + + /** + * @param spillOverThresholdNumRows This option allows queries to store intermediate and final results temporarily + * on disk if the number of rows produced by the query exceeds the specified value. + * This is used for decreasing the memory usage during the query execution. In a + * query that iterates over a collection that contains documents, each row is a + * document, and in a query that iterates over temporary values + * (i.e. FOR i IN 1..100), each row is one of such temporary values. + *

+ * This option only has an effect on queries that use the SORT operation but + * without a LIMIT, and if you enable the spillover feature by setting a path for + * the directory to store the temporary data in with the + * --temp.intermediate-results-path startup option. + *

+ * Default value: 5000000 rows. + *

+ * Spilling data from RAM onto disk is an experimental feature and is turned off + * by default. The query results are still built up entirely in RAM on Coordinators + * and single servers for non-streaming queries. To avoid the buildup of the entire + * query result in RAM, use a streaming query (see the stream option). + * @return this + */ + public ExplainAqlQueryOptions spillOverThresholdNumRows(final Long spillOverThresholdNumRows) { + getOptions().setSpillOverThresholdNumRows(spillOverThresholdNumRows); + return this; + } + + @JsonIgnore + public Boolean getStream() { + return getOptions().getStream(); + } + + @JsonIgnore + public Boolean getUsePlanCache() { + return getOptions().getUsePlanCache(); + } + + /** + * @param stream Specify true and the query will be executed in a streaming fashion. The query result is not + * stored on + * the server, but calculated on the fly. Beware: long-running queries will need to hold the + * collection + * locks for as long as the query cursor exists. When set to false a query will be executed right + * away in + * its entirety. In that case query results are either returned right away (if the resultset is small + * enough), or stored on the arangod instance and accessible via the cursor API (with respect to the + * ttl). It is advisable to only use this option on short-running queries or without exclusive locks + * (write-locks on MMFiles). Please note that the query options cache, count and fullCount will not + * work + * on streaming queries. Additionally query statistics, warnings and profiling data will only be + * available after the query is finished. The default value is false + * @return this + * @since ArangoDB 3.4.0 + */ + public ExplainAqlQueryOptions stream(final Boolean stream) { + getOptions().setStream(stream); + return this; + } + + /** + * @param usePlanCache Set this option to true to utilize a cached query plan or add the execution plan of this + * query to the cache if it’s not in the cache yet. Otherwise, the plan cache is bypassed + * (introduced in v3.12.4). + * Query plan caching can reduce the total time for processing queries by avoiding to parse, + * plan, and optimize queries over and over again that effectively have the same execution plan + * with at most some changes to bind parameter values. + * @return this + */ + public ExplainAqlQueryOptions usePlanCache(final Boolean usePlanCache) { + getOptions().setUsePlanCache(usePlanCache); + return this; + } + + @JsonIgnore + public Collection getRules() { + return getOptions().getOptimizer().getRules(); + } + + /** + * @param rules A list of to-be-included or to-be-excluded optimizer rules can be put into this attribute, + * telling the + * optimizer to include or exclude specific rules. To disable a rule, prefix its name with a -, to + * enable + * a rule, prefix it with a +. There is also a pseudo-rule all, which will match all optimizer rules + * @return this + */ + public ExplainAqlQueryOptions rules(final Collection rules) { + getOptions().getOptimizer().setRules(rules); + return this; + } + +} diff --git a/core/src/main/java/com/arangodb/model/FulltextIndexOptions.java b/core/src/main/java/com/arangodb/model/FulltextIndexOptions.java new file mode 100644 index 000000000..6d34c2d65 --- /dev/null +++ b/core/src/main/java/com/arangodb/model/FulltextIndexOptions.java @@ -0,0 +1,77 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.model; + +import com.arangodb.entity.IndexType; + +/** + * @author Mark Vollmary + * @deprecated since ArangoDB 3.10, use ArangoSearch or Inverted indexes instead. + */ +@Deprecated +public final class FulltextIndexOptions extends IndexOptions { + + private final IndexType type = IndexType.fulltext; + private Iterable fields; + private Integer minLength; + + public FulltextIndexOptions() { + super(); + } + + @Override + FulltextIndexOptions getThis() { + return this; + } + + public Iterable getFields() { + return fields; + } + + /** + * @param fields A list of attribute paths + * @return options + */ + FulltextIndexOptions fields(final Iterable fields) { + this.fields = fields; + return this; + } + + public IndexType getType() { + return type; + } + + public Integer getMinLength() { + return minLength; + } + + /** + * @param minLength Minimum character length of words to index. Will default to a server-defined value if + * unspecified. It + * is thus recommended to set this value explicitly when creating the index. + * @return options + */ + public FulltextIndexOptions minLength(final Integer minLength) { + this.minLength = minLength; + return this; + } + +} diff --git a/core/src/main/java/com/arangodb/model/GeoIndexOptions.java b/core/src/main/java/com/arangodb/model/GeoIndexOptions.java new file mode 100644 index 000000000..8e87e2590 --- /dev/null +++ b/core/src/main/java/com/arangodb/model/GeoIndexOptions.java @@ -0,0 +1,92 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.model; + +import com.arangodb.entity.IndexType; + +/** + * @author Mark Vollmary + */ +public final class GeoIndexOptions extends IndexOptions { + + private final IndexType type = IndexType.geo; + private Iterable fields; + private Boolean geoJson; + private Boolean legacyPolygons; + + public GeoIndexOptions() { + super(); + } + + @Override + GeoIndexOptions getThis() { + return this; + } + + public Iterable getFields() { + return fields; + } + + /** + * @param fields A list of attribute paths + * @return options + */ + GeoIndexOptions fields(final Iterable fields) { + this.fields = fields; + return this; + } + + public IndexType getType() { + return type; + } + + public Boolean getGeoJson() { + return geoJson; + } + + /** + * @param geoJson If a geo-spatial index on a location is constructed and geoJson is true, then the order within the + * array is longitude followed by latitude. This corresponds to the format described in + * @return options + */ + public GeoIndexOptions geoJson(final Boolean geoJson) { + this.geoJson = geoJson; + return this; + } + + public Boolean getLegacyPolygons() { + return legacyPolygons; + } + + /** + * @param legacyPolygons If `true` will use the old rules (pre-3.10) for the parsing GeoJSON polygons. This + * allows you to let old indexes produce the same, potentially wrong results as before an + * upgrade. A geo index with `legacyPolygons` set to `false` will use the new, correct and + * consistent method for parsing of GeoJSON polygons. + * See Legacy Polygons. + * @return options + * @since ArangoDB 3.10 + */ + public GeoIndexOptions legacyPolygons(final Boolean legacyPolygons) { + this.legacyPolygons = legacyPolygons; + return this; + } +} diff --git a/core/src/main/java/com/arangodb/model/GraphCreateOptions.java b/core/src/main/java/com/arangodb/model/GraphCreateOptions.java new file mode 100644 index 000000000..957a64221 --- /dev/null +++ b/core/src/main/java/com/arangodb/model/GraphCreateOptions.java @@ -0,0 +1,280 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.model; + +import com.arangodb.entity.EdgeDefinition; +import com.arangodb.entity.ReplicationFactor; + +import java.util.Arrays; +import java.util.Collection; + +/** + * @author Mark Vollmary + */ +public final class GraphCreateOptions { + private String name; + private Collection edgeDefinitions; + private Collection orphanCollections; + private Boolean isSmart; + private SmartOptions options; + private Boolean waitForSync; + + public GraphCreateOptions() { + super(); + } + + public String getName() { + return name; + } + + /** + * @param name Name of the graph + * @return options + */ + GraphCreateOptions name(final String name) { + this.name = name; + return this; + } + + public Collection getEdgeDefinitions() { + return edgeDefinitions; + } + + /** + * @param edgeDefinitions An array of definitions for the edge + * @return options + */ + GraphCreateOptions edgeDefinitions(final Collection edgeDefinitions) { + this.edgeDefinitions = edgeDefinitions; + return this; + } + + public Collection getOrphanCollections() { + return orphanCollections; + } + + /** + * @param orphanCollections Additional vertex collections + * @return options + */ + public GraphCreateOptions orphanCollections(final String... orphanCollections) { + this.orphanCollections = Arrays.asList(orphanCollections); + return this; + } + + public Boolean getIsSmart() { + return isSmart; + } + + /** + * @param isSmart Define if the created graph should be smart. This only has effect in Enterprise version. + * @return options + */ + public GraphCreateOptions isSmart(final Boolean isSmart) { + this.isSmart = isSmart; + return this; + } + + public Boolean getIsDisjoint() { + return getOptions().getIsDisjoint(); + } + + /** + * @param isDisjoint If set to true, a Disjoint SmartGraph will be created. This flag is not editable after + * creation. Default: false. + * @return options + * @since ArangoDB 3.7 + */ + public GraphCreateOptions isDisjoint(final Boolean isDisjoint) { + getOptions().setIsDisjoint(isDisjoint); + return this; + } + + public ReplicationFactor getReplicationFactor() { + return getOptions().replicationFactor; + } + + /** + * @param replicationFactor (The default is 1): in a cluster, this attribute determines how many copies of each + * shard are kept on + * different DBServers. The value 1 means that only one copy (no synchronous + * replication) is kept. A + * value of k means that k-1 replicas are kept. Any two copies reside on different + * DBServers. Replication + * between them is synchronous, that is, every write operation to the "leader" copy will + * be replicated to + * all "follower" replicas, before the write operation is reported successful. If a + * server fails, this is + * detected automatically and one of the servers holding copies take over, usually + * without an error being + * reported. + * @return options + */ + public GraphCreateOptions replicationFactor(final ReplicationFactor replicationFactor) { + getOptions().setReplicationFactor(replicationFactor); + return this; + } + + public GraphCreateOptions replicationFactor(int replicationFactor) { + getOptions().setReplicationFactor(ReplicationFactor.of(replicationFactor)); + return this; + } + + public Integer getWriteConcern() { + return getOptions().getWriteConcern(); + } + + /** + * @param writeConcern Write concern for new collections in the graph. + * It determines how many copies of each shard are required to be in sync on the different + * DB-Servers. If there are less then these many copies in the cluster a shard will refuse to + * write. Writes to shards with enough up-to-date copies will succeed at the same time however. + * The value of writeConcern can not be larger than replicationFactor. (cluster only) + * @return options + */ + public GraphCreateOptions writeConcern(final Integer writeConcern) { + getOptions().setWriteConcern(writeConcern); + return this; + } + + public Integer getNumberOfShards() { + return getOptions().getNumberOfShards(); + } + + /** + * @param numberOfShards The number of shards that is used for every collection within this graph. Cannot be + * modified later. + * @return options + */ + public GraphCreateOptions numberOfShards(final Integer numberOfShards) { + getOptions().setNumberOfShards(numberOfShards); + return this; + } + + public String getSmartGraphAttribute() { + return getOptions().getSmartGraphAttribute(); + } + + /** + * @param smartGraphAttribute The attribute name that is used to smartly shard the vertices of a graph. Every + * vertex in this Graph + * has to have this attribute. Cannot be modified later. + * @return options + */ + public GraphCreateOptions smartGraphAttribute(final String smartGraphAttribute) { + getOptions().setSmartGraphAttribute(smartGraphAttribute); + return this; + } + + public Collection getSatellites() { + return getOptions().getSatellites(); + } + + /** + * @param satellites collection names that will be used to create SatelliteCollections + * for a Hybrid (Disjoint) SmartGraph (Enterprise Edition only). Each array element + * must be a valid collection name. The collection type cannot be modified later. + * @return options + * @since ArangoDB 3.9.0 + */ + public GraphCreateOptions satellites(final String... satellites) { + getOptions().setSatellites(satellites); + return this; + } + + public Boolean getWaitForSync() { + return waitForSync; + } + + public GraphCreateOptions waitForSync(final Boolean waitForSync) { + this.waitForSync = waitForSync; + return this; + } + + public SmartOptions getOptions() { + if (options == null) { + options = new SmartOptions(); + } + return options; + } + + public static final class SmartOptions { + private String smartGraphAttribute; + private Collection satellites; + private Integer numberOfShards; + private ReplicationFactor replicationFactor; + private Integer writeConcern; + private Boolean isDisjoint; + + public SmartOptions() { + super(); + } + + public ReplicationFactor getReplicationFactor() { + return replicationFactor; + } + + public void setReplicationFactor(final ReplicationFactor replicationFactor) { + this.replicationFactor = replicationFactor; + } + + public Integer getWriteConcern() { + return writeConcern; + } + + public void setWriteConcern(final Integer writeConcern) { + this.writeConcern = writeConcern; + } + + public Integer getNumberOfShards() { + return numberOfShards; + } + + public void setNumberOfShards(final Integer numberOfShards) { + this.numberOfShards = numberOfShards; + } + + public String getSmartGraphAttribute() { + return smartGraphAttribute; + } + + public void setSmartGraphAttribute(final String smartGraphAttribute) { + this.smartGraphAttribute = smartGraphAttribute; + } + + public Boolean getIsDisjoint() { + return isDisjoint; + } + + public void setIsDisjoint(final Boolean isDisjoint) { + this.isDisjoint = isDisjoint; + } + + public Collection getSatellites() { + return satellites; + } + + public void setSatellites(final String... satellites) { + this.satellites = Arrays.asList(satellites); + } + } + +} diff --git a/core/src/main/java/com/arangodb/model/GraphDocumentReadOptions.java b/core/src/main/java/com/arangodb/model/GraphDocumentReadOptions.java new file mode 100644 index 000000000..d68cdba7a --- /dev/null +++ b/core/src/main/java/com/arangodb/model/GraphDocumentReadOptions.java @@ -0,0 +1,79 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.model; + +/** + * @author Mark Vollmary + */ +public final class GraphDocumentReadOptions extends TransactionalOptions { + + private String ifNoneMatch; + private String ifMatch; + private Boolean allowDirtyRead; + + @Override + GraphDocumentReadOptions getThis() { + return this; + } + + public String getIfNoneMatch() { + return ifNoneMatch; + } + + /** + * @param ifNoneMatch document revision must not contain If-None-Match + * @return options + */ + public GraphDocumentReadOptions ifNoneMatch(final String ifNoneMatch) { + this.ifNoneMatch = ifNoneMatch; + return this; + } + + public String getIfMatch() { + return ifMatch; + } + + /** + * @param ifMatch document revision must contain If-Match + * @return options + */ + public GraphDocumentReadOptions ifMatch(final String ifMatch) { + this.ifMatch = ifMatch; + return this; + } + + /** + * @param allowDirtyRead Set to {@code true} allows reading from followers in an active-failover setup. + * @return options + * @see API + * Documentation + * @since ArangoDB 3.4.0 + */ + public GraphDocumentReadOptions allowDirtyRead(final Boolean allowDirtyRead) { + this.allowDirtyRead = allowDirtyRead; + return this; + } + + public Boolean getAllowDirtyRead() { + return allowDirtyRead; + } + +} diff --git a/src/main/java/com/arangodb/model/ImportType.java b/core/src/main/java/com/arangodb/model/ImportType.java similarity index 93% rename from src/main/java/com/arangodb/model/ImportType.java rename to core/src/main/java/com/arangodb/model/ImportType.java index 3d7c82c1c..3cbd123e0 100644 --- a/src/main/java/com/arangodb/model/ImportType.java +++ b/core/src/main/java/com/arangodb/model/ImportType.java @@ -1,29 +1,28 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.model; - -/** - * @author Mark Vollmary - * - */ -public enum ImportType { - documents, list, auto -} +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.model; + +/** + * @author Mark Vollmary + */ +public enum ImportType { + documents, list, auto +} diff --git a/core/src/main/java/com/arangodb/model/IndexOptions.java b/core/src/main/java/com/arangodb/model/IndexOptions.java new file mode 100644 index 000000000..094547eea --- /dev/null +++ b/core/src/main/java/com/arangodb/model/IndexOptions.java @@ -0,0 +1,67 @@ +/* + * DISCLAIMER + * + * Copyright 2019 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.model; + +import com.arangodb.arch.NoRawTypesInspection; + +/** + * @author Heiko Kernbach + *

+ * This final class is used for all index similarities + */ +@NoRawTypesInspection +public abstract class IndexOptions> { + + private Boolean inBackground; + private String name; + + protected IndexOptions() { + } + + abstract T getThis(); + + /** + * @param inBackground create the the index in the background + * this is a RocksDB only flag. + * @return options + */ + public T inBackground(final Boolean inBackground) { + this.inBackground = inBackground; + return getThis(); + } + + public Boolean getInBackground() { + return inBackground; + } + + /** + * @param name the name of the index + * @return options + */ + public T name(final String name) { + this.name = name; + return getThis(); + } + + public String getName() { + return name; + } +} diff --git a/core/src/main/java/com/arangodb/model/InvertedIndexOptions.java b/core/src/main/java/com/arangodb/model/InvertedIndexOptions.java new file mode 100644 index 000000000..46a10eb84 --- /dev/null +++ b/core/src/main/java/com/arangodb/model/InvertedIndexOptions.java @@ -0,0 +1,414 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.model; + +import com.arangodb.entity.IndexType; +import com.arangodb.entity.InvertedIndexField; +import com.arangodb.entity.InvertedIndexPrimarySort; +import com.arangodb.entity.arangosearch.*; + +import java.util.*; + +/** + * @author Michele Rastelli + * @since ArangoDB 3.10 + */ +public final class InvertedIndexOptions extends IndexOptions { + + private final IndexType type = IndexType.inverted; + private Integer parallelism; + private InvertedIndexPrimarySort primarySort; + private final Collection storedValues = new ArrayList<>(); + private final Collection optimizeTopK = new ArrayList<>(); + private String analyzer; + private final Set features = new HashSet<>(); + private Boolean includeAllFields; + private Boolean trackListPositions; + private Boolean searchField; + private final Collection fields = new ArrayList<>(); + private Long consolidationIntervalMsec; + private Long commitIntervalMsec; + private Long cleanupIntervalStep; + private ConsolidationPolicy consolidationPolicy; + private Long writebufferIdle; + private Long writebufferActive; + private Long writebufferSizeMax; + private Boolean cache; + private Boolean primaryKeyCache; + + public InvertedIndexOptions() { + super(); + } + + @Override + InvertedIndexOptions getThis() { + return this; + } + + public IndexType getType() { + return type; + } + + public Integer getParallelism() { + return parallelism; + } + + /** + * @param parallelism The number of threads to use for indexing the fields. Default: 2 + * @return this + */ + public InvertedIndexOptions parallelism(Integer parallelism) { + this.parallelism = parallelism; + return this; + } + + public InvertedIndexPrimarySort getPrimarySort() { + return primarySort; + } + + /** + * @param primarySort You can define a primary sort order to enable an AQL optimization. If a query iterates over + * all documents of a collection, wants to sort them by attribute values, and the (left-most) + * fields to sort by, as well as their sorting direction, match with the primarySort definition, + * then the SORT operation is optimized away. + * @return this + */ + public InvertedIndexOptions primarySort(InvertedIndexPrimarySort primarySort) { + this.primarySort = primarySort; + return this; + } + + public Collection getStoredValues() { + return storedValues; + } + + /** + * @param storedValues The optional storedValues attribute can contain an array of paths to additional attributes to + * store in the index. These additional attributes cannot be used for index lookups or for + * sorting, but they can be used for projections. This allows an index to fully cover more + * queries and avoid extra document lookups. + * @return this + */ + public InvertedIndexOptions storedValues(StoredValue... storedValues) { + Collections.addAll(this.storedValues, storedValues); + return this; + } + + public Collection getOptimizeTopK() { + return optimizeTopK; + } + + /** + * @param optimizeTopK An array of strings defining sort expressions that you want to optimize. + * @return options + * @since ArangoDB 3.11, Enterprise Edition only + */ + public InvertedIndexOptions optimizeTopK(String... optimizeTopK) { + Collections.addAll(this.optimizeTopK, optimizeTopK); + return this; + } + + public String getAnalyzer() { + return analyzer; + } + + /** + * @param analyzer The name of an Analyzer to use by default. This Analyzer is applied to the values of the indexed + * fields for which you don’t define Analyzers explicitly. + * @return this + */ + public InvertedIndexOptions analyzer(String analyzer) { + this.analyzer = analyzer; + return this; + } + + public Set getFeatures() { + return features; + } + + /** + * @param features A list of Analyzer features to use by default. They define what features are enabled for the + * default analyzer. + * @return this + */ + public InvertedIndexOptions features(AnalyzerFeature... features) { + Collections.addAll(this.features, features); + return this; + } + + public Boolean getIncludeAllFields() { + return includeAllFields; + } + + /** + * @param includeAllFields This option only applies if you use the inverted index in a search-alias Views. If set to + * true, then all sub-attributes of this field are indexed, excluding any sub-attributes + * that are configured separately by other elements in the fields array (and their + * sub-attributes). The analyzer and features properties apply to the sub-attributes. If set + * to false, then sub-attributes are ignored. The default value is defined by the top-level + * includeAllFields option, or false if not set. + * @return this + */ + public InvertedIndexOptions includeAllFields(Boolean includeAllFields) { + this.includeAllFields = includeAllFields; + return this; + } + + public Boolean getTrackListPositions() { + return trackListPositions; + } + + /** + * @param trackListPositions This option only applies if you use the inverted index in a search-alias Views. If set + * to true, then track the value position in arrays for array values. For example, when + * querying a document like { attr: [ "valueX", "valueY", "valueZ" ] }, you need to + * specify the array element, e.g. doc.attr[1] == "valueY". If set to false, all values in + * an array are treated as equal alternatives. You don’t specify an array element in + * queries, e.g. doc.attr == "valueY", and all elements are searched for a match. Default: + * the value defined by the top-level trackListPositions option, or false if not set. + * @return this + */ + public InvertedIndexOptions trackListPositions(Boolean trackListPositions) { + this.trackListPositions = trackListPositions; + return this; + } + + public Boolean getSearchField() { + return searchField; + } + + /** + * @param searchField This option only applies if you use the inverted index in a search-alias Views. You can set + * the option to true to get the same behavior as with arangosearch Views regarding the indexing + * of array values as the default. If enabled, both, array and primitive values (strings, + * numbers, etc.) are accepted. Every element of an array is indexed according to the + * trackListPositions option. If set to false, it depends on the attribute path. If it explicitly + * expand an array ([*]), then the elements are indexed separately. Otherwise, the array is + * indexed as a whole, but only geopoint and aql Analyzers accept array inputs. You cannot use an + * array expansion if searchField is enabled. + * @return this + */ + public InvertedIndexOptions searchField(Boolean searchField) { + this.searchField = searchField; + return this; + } + + public Collection getFields() { + return fields; + } + + /** + * @param fields An array of attribute paths as strings to index the fields with the default options, or objects to + * specify options for the fields. + * @return this + */ + public InvertedIndexOptions fields(InvertedIndexField... fields) { + Collections.addAll(this.fields, fields); + return this; + } + + public Long getConsolidationIntervalMsec() { + return consolidationIntervalMsec; + } + + /** + * @param consolidationIntervalMsec Wait at least this many milliseconds between applying β€˜consolidationPolicy’ to + * consolidate View data store and possibly release space on the filesystem + * (default: 1000, to disable use: 0). For the case where there are a lot of data + * modification operations, a higher value could potentially have the data store + * consume more space and file handles. For the case where there are a few data + * modification operations, a lower value will impact performance due to no segment + * candidates available for consolidation. Background: For data modification + * ArangoSearch Views follow the concept of a β€œversioned data store”. Thus old + * versions of data may be removed once there are no longer any users of the old + * data. The frequency of the cleanup and compaction operations are governed by + * β€˜consolidationIntervalMsec’ and the candidates for compaction are selected via + * β€˜consolidationPolicy’. + * @return this + */ + public InvertedIndexOptions consolidationIntervalMsec(Long consolidationIntervalMsec) { + this.consolidationIntervalMsec = consolidationIntervalMsec; + return this; + } + + public Long getCommitIntervalMsec() { + return commitIntervalMsec; + } + + /** + * @param commitIntervalMsec Wait at least this many milliseconds between committing View data store changes and + * making documents visible to queries (default: 1000, to disable use: 0). For the case + * where there are a lot of inserts/updates, a lower value, until commit, will cause the + * index not to account for them and memory usage would continue to grow. For the case + * where there are a few inserts/updates, a higher value will impact performance and waste + * disk space for each commit call without any added benefits. Background: For data + * retrieval ArangoSearch Views follow the concept of β€œeventually-consistent”, i.e. + * eventually all the data in ArangoDB will be matched by corresponding query expressions. + * The concept of ArangoSearch View β€œcommit” operation is introduced to control the + * upper-bound on the time until document addition/removals are actually reflected by + * corresponding query expressions. Once a β€œcommit” operation is complete all documents + * added/removed prior to the start of the β€œcommit” operation will be reflected by queries + * invoked in subsequent ArangoDB transactions, in-progress ArangoDB transactions will + * still continue to return a repeatable-read state. + * @return this + */ + public InvertedIndexOptions commitIntervalMsec(Long commitIntervalMsec) { + this.commitIntervalMsec = commitIntervalMsec; + return this; + } + + public Long getCleanupIntervalStep() { + return cleanupIntervalStep; + } + + /** + * @param cleanupIntervalStep Wait at least this many commits between removing unused files in the ArangoSearch data + * directory (default: 2, to disable use: 0). For the case where the consolidation + * policies merge segments often (i.e. a lot of commit+consolidate), a lower value will + * cause a lot of disk space to be wasted. For the case where the consolidation policies + * rarely merge segments (i.e. few inserts/deletes), a higher value will impact + * performance without any added benefits. Background: With every β€œcommit” or + * β€œconsolidate” operation a new state of the View internal data-structures is created on + * disk. Old states/snapshots are released once there are no longer any users remaining. + * However, the files for the released states/snapshots are left on disk, and only + * removed by β€œcleanup” operation. + * @return this + */ + public InvertedIndexOptions cleanupIntervalStep(Long cleanupIntervalStep) { + this.cleanupIntervalStep = cleanupIntervalStep; + return this; + } + + public ConsolidationPolicy getConsolidationPolicy() { + return consolidationPolicy; + } + + /** + * @param consolidationPolicy The consolidation policy to apply for selecting which segments should be merged + * (default: {}). Background: With each ArangoDB transaction that inserts documents one + * or more ArangoSearch internal segments gets created. Similarly for removed documents + * the segments that contain such documents will have these documents marked as + * β€˜deleted’. Over time this approach causes a lot of small and sparse segments to be + * created. A β€œconsolidation” operation selects one or more segments and copies all of + * their valid documents into a single new segment, thereby allowing the search algorithm + * to perform more optimally and for extra file handles to be released once old segments + * are no longer used. + * @return this + */ + public InvertedIndexOptions consolidationPolicy(ConsolidationPolicy consolidationPolicy) { + this.consolidationPolicy = consolidationPolicy; + return this; + } + + public Long getWritebufferIdle() { + return writebufferIdle; + } + + /** + * @param writebufferIdle Maximum number of writers (segments) cached in the pool (default: 64, use 0 to disable) + * @return this + */ + public InvertedIndexOptions writebufferIdle(Long writebufferIdle) { + this.writebufferIdle = writebufferIdle; + return this; + } + + public Long getWritebufferActive() { + return writebufferActive; + } + + /** + * @param writebufferActive Maximum number of concurrent active writers (segments) that perform a transaction. Other + * writers (segments) wait till current active writers (segments) finish (default: 0, use 0 + * to disable) + * @return this + */ + public InvertedIndexOptions writebufferActive(Long writebufferActive) { + this.writebufferActive = writebufferActive; + return this; + } + + public Long getWritebufferSizeMax() { + return writebufferSizeMax; + } + + /** + * @param writebufferSizeMax Maximum memory byte size per writer (segment) before a writer (segment) flush is + * triggered. 0 value turns off this limit for any writer (buffer) and data will be + * flushed periodically based on the value defined for the flush thread (ArangoDB server + * startup option). 0 value should be used carefully due to high potential memory + * consumption (default: 33554432, use 0 to disable) + * @return this + */ + public InvertedIndexOptions writebufferSizeMax(Long writebufferSizeMax) { + this.writebufferSizeMax = writebufferSizeMax; + return this; + } + + public Boolean getCache() { + return cache; + } + + /** + * @param cache Enable this option to always cache the field normalization values in memory for all fields by + * default. This can improve the performance of scoring and ranking queries. Otherwise, these values + * are memory-mapped and it is up to the operating system to load them from disk into memory and to + * evict them from memory. + *

+ * Default: `false`. (Enterprise Edition only) + * @return this + * @since ArangoDB 3.10.2 + */ + public InvertedIndexOptions cache(Boolean cache) { + this.cache = cache; + return this; + } + + public Boolean getPrimaryKeyCache() { + return primaryKeyCache; + } + + /** + * @param primaryKeyCache If you enable this option, then the primary key columns are always cached in memory. This + * can improve the performance of queries that return many documents. Otherwise, these values + * are memory-mapped and it is up to the operating system to load them from disk into memory + * and to evict them from memory (Enterprise Edition only). (default: false) + * @return this + * @since ArangoDB 3.10.2 + */ + public InvertedIndexOptions primaryKeyCache(Boolean primaryKeyCache) { + this.primaryKeyCache = primaryKeyCache; + return this; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + InvertedIndexOptions that = (InvertedIndexOptions) o; + return type == that.type && Objects.equals(parallelism, that.parallelism) && Objects.equals(primarySort, that.primarySort) && Objects.equals(storedValues, that.storedValues) && Objects.equals(analyzer, that.analyzer) && Objects.equals(features, that.features) && Objects.equals(includeAllFields, that.includeAllFields) && Objects.equals(trackListPositions, that.trackListPositions) && Objects.equals(searchField, that.searchField) && Objects.equals(fields, that.fields) && Objects.equals(consolidationIntervalMsec, that.consolidationIntervalMsec) && Objects.equals(commitIntervalMsec, that.commitIntervalMsec) && Objects.equals(cleanupIntervalStep, that.cleanupIntervalStep) && Objects.equals(consolidationPolicy, that.consolidationPolicy) && Objects.equals(writebufferIdle, that.writebufferIdle) && Objects.equals(writebufferActive, that.writebufferActive) && Objects.equals(writebufferSizeMax, that.writebufferSizeMax) && Objects.equals(cache, that.cache) && Objects.equals(primaryKeyCache, that.primaryKeyCache); + } + + @Override + public int hashCode() { + return Objects.hash(type, parallelism, primarySort, storedValues, analyzer, features, includeAllFields, trackListPositions, searchField, fields, consolidationIntervalMsec, commitIntervalMsec, cleanupIntervalStep, consolidationPolicy, writebufferIdle, writebufferActive, writebufferSizeMax, cache, primaryKeyCache); + } +} diff --git a/core/src/main/java/com/arangodb/model/LogLevelOptions.java b/core/src/main/java/com/arangodb/model/LogLevelOptions.java new file mode 100644 index 000000000..9ce6ca52e --- /dev/null +++ b/core/src/main/java/com/arangodb/model/LogLevelOptions.java @@ -0,0 +1,14 @@ +package com.arangodb.model; + +public class LogLevelOptions { + private String serverId; + + public String getServerId() { + return serverId; + } + + public LogLevelOptions serverId(final String serverId) { + this.serverId = serverId; + return this; + } +} diff --git a/core/src/main/java/com/arangodb/model/LogOptions.java b/core/src/main/java/com/arangodb/model/LogOptions.java new file mode 100644 index 000000000..b6cc65f8a --- /dev/null +++ b/core/src/main/java/com/arangodb/model/LogOptions.java @@ -0,0 +1,149 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.model; + +import com.arangodb.entity.LogLevel; + +/** + * @author Mark Vollmary + */ +public final class LogOptions { + + public static final String PROPERTY_UPTO = "upto"; + public static final String PROPERTY_LEVEL = "level"; + public static final String PROPERTY_START = "start"; + public static final String PROPERTY_SIZE = "size"; + public static final String PROPERTY_OFFSET = "offset"; + public static final String PROPERTY_SEARCH = "search"; + public static final String PROPERTY_SORT = "sort"; + private LogLevel upto; + private LogLevel level; + private Long start; + private Integer size; + private Integer offset; + private String search; + private SortOrder sort; + + public LogOptions() { + super(); + } + + public LogLevel getUpto() { + return upto; + } + + /** + * @param upto Returns all log entries up to log level upto + * @return options + */ + public LogOptions upto(final LogLevel upto) { + this.upto = upto; + return this; + } + + public LogLevel getLevel() { + return level; + } + + /** + * @param level Returns all log entries of log level level. Note that the query parameters upto and level are + * mutually + * exclusive + * @return options + */ + public LogOptions level(final LogLevel level) { + this.level = level; + return this; + } + + public Long getStart() { + return start; + } + + /** + * @param start Returns all log entries such that their log entry identifier (lid value) is greater or equal to + * start + * @return options + */ + public LogOptions start(final Long start) { + this.start = start; + return this; + } + + public Integer getSize() { + return size; + } + + /** + * @param size Restricts the result to at most size log entries + * @return options + */ + public LogOptions size(final Integer size) { + this.size = size; + return this; + } + + public Integer getOffset() { + return offset; + } + + /** + * @param offset Starts to return log entries skipping the first offset log entries. offset and size can be used for + * pagination + * @return options + */ + public LogOptions offset(final Integer offset) { + this.offset = offset; + return this; + } + + public String getSearch() { + return search; + } + + /** + * @param search Only return the log entries containing the text specified in search + * @return options + */ + public LogOptions search(final String search) { + this.search = search; + return this; + } + + public SortOrder getSort() { + return sort; + } + + /** + * @param sort Sort the log entries either ascending (if sort is asc) or descending (if sort is desc) according to + * their lid values. Note that the lid imposes a chronological order. The default value is asc + * @return options + */ + public LogOptions sort(final SortOrder sort) { + this.sort = sort; + return this; + } + + public enum SortOrder { + asc, desc + } + +} diff --git a/core/src/main/java/com/arangodb/model/MDIFieldValueTypes.java b/core/src/main/java/com/arangodb/model/MDIFieldValueTypes.java new file mode 100644 index 000000000..1bf3fcb10 --- /dev/null +++ b/core/src/main/java/com/arangodb/model/MDIFieldValueTypes.java @@ -0,0 +1,8 @@ +package com.arangodb.model; + +import com.fasterxml.jackson.annotation.JsonProperty; + +public enum MDIFieldValueTypes { + @JsonProperty("double") + DOUBLE +} diff --git a/core/src/main/java/com/arangodb/model/MDIndexOptions.java b/core/src/main/java/com/arangodb/model/MDIndexOptions.java new file mode 100644 index 000000000..c269b9cb8 --- /dev/null +++ b/core/src/main/java/com/arangodb/model/MDIndexOptions.java @@ -0,0 +1,46 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.model; + +import com.arangodb.entity.IndexType; + + +/** + * @author Michele Rastelli + * @since ArangoDB 3.12 + */ +public final class MDIndexOptions extends AbstractMDIndexOptions { + + public MDIndexOptions() { + super(); + } + + @Override + public IndexType getType() { + return IndexType.mdi; + } + + @Override + MDIndexOptions getThis() { + return this; + } + +} diff --git a/core/src/main/java/com/arangodb/model/MDPrefixedIndexOptions.java b/core/src/main/java/com/arangodb/model/MDPrefixedIndexOptions.java new file mode 100644 index 000000000..cf10a3444 --- /dev/null +++ b/core/src/main/java/com/arangodb/model/MDPrefixedIndexOptions.java @@ -0,0 +1,61 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.model; + +import com.arangodb.entity.IndexType; + + +/** + * @author Michele Rastelli + * @since ArangoDB 3.12 + */ +public final class MDPrefixedIndexOptions extends AbstractMDIndexOptions { + + private Iterable prefixFields; + + public MDPrefixedIndexOptions() { + super(); + } + + public Iterable getPrefixFields() { + return prefixFields; + } + + /** + * @param prefixFields An array of attribute names used as search prefix. Array expansions are not allowed. + * @return options + */ + public MDPrefixedIndexOptions prefixFields(final Iterable prefixFields) { + this.prefixFields = prefixFields; + return this; + } + + @Override + public IndexType getType() { + return IndexType.mdiPrefixed; + } + + @Override + MDPrefixedIndexOptions getThis() { + return this; + } + +} diff --git a/core/src/main/java/com/arangodb/model/OptionsBuilder.java b/core/src/main/java/com/arangodb/model/OptionsBuilder.java new file mode 100644 index 000000000..1c6d4dfc6 --- /dev/null +++ b/core/src/main/java/com/arangodb/model/OptionsBuilder.java @@ -0,0 +1,140 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.model; + +import com.arangodb.entity.EdgeDefinition; +import com.arangodb.entity.Permissions; +import com.arangodb.entity.ViewType; + +import java.util.ArrayList; +import java.util.Map; + +/** + * @author Mark Vollmary + * @author Michele Rastelli + */ +public final class OptionsBuilder { + + private OptionsBuilder() { + super(); + } + + public static UserCreateOptions build(final UserCreateOptions options, final String user, final String passwd) { + return options.user(user).passwd(passwd); + } + + public static PersistentIndexOptions build(final PersistentIndexOptions options, final Iterable fields) { + return options.fields(fields); + } + + public static GeoIndexOptions build(final GeoIndexOptions options, final Iterable fields) { + return options.fields(fields); + } + + @Deprecated + public static FulltextIndexOptions build(final FulltextIndexOptions options, final Iterable fields) { + return options.fields(fields); + } + + public static TtlIndexOptions build(final TtlIndexOptions options, final Iterable fields) { + return options.fields(fields); + } + + public static ZKDIndexOptions build(final ZKDIndexOptions options, final Iterable fields) { + return options.fields(fields); + } + + public static AbstractMDIndexOptions build(final AbstractMDIndexOptions options, final Iterable fields) { + return options.fields(fields); + } + + public static CollectionCreateOptions build(final CollectionCreateOptions options, final String name) { + return options.name(name); + } + + public static AqlQueryOptions build(final AqlQueryOptions options, final String query, + final Map bindVars) { + return options.query(query).bindVars(bindVars); + } + + public static AqlQueryExplainOptions build( + final AqlQueryExplainOptions options, + final String query, + final Map bindVars) { + return options.query(query).bindVars(bindVars); + } + + public static ExplainAqlQueryOptions build( + final ExplainAqlQueryOptions options, + final String query, + final Map bindVars) { + return options.query(query).bindVars(bindVars); + } + + public static AqlQueryParseOptions build(final AqlQueryParseOptions options, final String query) { + return options.query(query); + } + + public static GraphCreateOptions build( + final GraphCreateOptions options, + final String name, + final Iterable edgeDefinitions) { + ArrayList edCol = new ArrayList<>(); + if (edgeDefinitions != null) { + edgeDefinitions.forEach(edCol::add); + } + return options.name(name).edgeDefinitions(edCol); + } + + public static TransactionOptions build(final TransactionOptions options, final String action) { + return options.action(action); + } + + public static CollectionRenameOptions build(final CollectionRenameOptions options, final String name) { + return options.name(name); + } + + public static UserAccessOptions build(final UserAccessOptions options, final Permissions grant) { + return options.grant(grant); + } + + public static AqlFunctionCreateOptions build( + final AqlFunctionCreateOptions options, + final String name, + final String code) { + return options.name(name).code(code); + } + + public static VertexCollectionCreateOptions build( + final VertexCollectionCreateOptions options, + final String collection) { + return options.collection(collection); + } + + public static ViewCreateOptions build(final ViewCreateOptions options, final String name, final ViewType type) { + return options.name(name).type(type); + } + + public static ViewRenameOptions build(final ViewRenameOptions options, final String name) { + return options.name(name); + } + +} diff --git a/core/src/main/java/com/arangodb/model/OverwriteMode.java b/core/src/main/java/com/arangodb/model/OverwriteMode.java new file mode 100644 index 000000000..9e09d4d52 --- /dev/null +++ b/core/src/main/java/com/arangodb/model/OverwriteMode.java @@ -0,0 +1,67 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.model; + + +/** + * @author Michele Rastelli + * @since ArangoDB 3.7 + */ +public enum OverwriteMode { + + /** + * if a document with the specified _key value exists already, nothing will be done and no write operation will be + * carried out. The insert operation will return success in this case. This mode does not support returning the old + * document version using RETURN OLD. When using RETURN NEW, null will be returned in case the document already + * existed. + */ + ignore("ignore"), + + /** + * if a document with the specified _key value exists already, it will be overwritten with the specified document + * value. This mode will also be used when no overwrite mode is specified but the overwrite flag is set to true. + */ + replace("replace"), + + /** + * if a document with the specified _key value exists already, it will be patched (partially updated) with the + * specified document value. The overwrite mode can be further controlled via the keepNull and mergeObjects + * parameters. + */ + update("update"), + + /** + * if a document with the specified _key value exists already, return a unique constraint violation error so that + * the insert operation fails. This is also the default behavior in case the overwrite mode is not set, and the + * overwrite flag is false or not set either. + */ + conflict("conflict"); + + private final String value; + + OverwriteMode(String value) { + this.value = value; + } + + public String getValue() { + return value; + } +} diff --git a/core/src/main/java/com/arangodb/model/PersistentIndexOptions.java b/core/src/main/java/com/arangodb/model/PersistentIndexOptions.java new file mode 100644 index 000000000..cbb5b6f06 --- /dev/null +++ b/core/src/main/java/com/arangodb/model/PersistentIndexOptions.java @@ -0,0 +1,156 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.model; + +import com.arangodb.entity.IndexType; + +import java.util.Collection; +import java.util.Collections; +import java.util.HashSet; + +/** + * @author Mark Vollmary + */ +public final class PersistentIndexOptions extends IndexOptions { + + private final IndexType type = IndexType.persistent; + private Iterable fields; + private Boolean unique; + private Boolean sparse; + private Boolean deduplicate; + private Boolean estimates; + private Boolean cacheEnabled; + private Collection storedValues; + + public PersistentIndexOptions() { + super(); + } + + @Override + PersistentIndexOptions getThis() { + return this; + } + + public Iterable getFields() { + return fields; + } + + /** + * @param fields A list of attribute paths + * @return options + */ + PersistentIndexOptions fields(final Iterable fields) { + this.fields = fields; + return this; + } + + public IndexType getType() { + return type; + } + + public Boolean getUnique() { + return unique; + } + + /** + * @param unique if true, then create a unique index + * @return options + */ + public PersistentIndexOptions unique(final Boolean unique) { + this.unique = unique; + return this; + } + + public Boolean getSparse() { + return sparse; + } + + /** + * @param sparse if true, then create a sparse index + * @return options + */ + public PersistentIndexOptions sparse(final Boolean sparse) { + this.sparse = sparse; + return this; + } + + public Boolean getDeduplicate() { + return deduplicate; + } + + /** + * @param deduplicate if false, the deduplication of array values is turned off. Default: {@code true} + * @return options + */ + public PersistentIndexOptions deduplicate(final Boolean deduplicate) { + this.deduplicate = deduplicate; + return this; + } + + /** + * @param estimates This attribute controls whether index selectivity estimates are maintained for the index. + * Default: {@code + * true} + * @since ArangoDB 3.8 + */ + public PersistentIndexOptions estimates(final Boolean estimates) { + this.estimates = estimates; + return this; + } + + public Boolean getEstimates() { + return estimates; + } + + /** + * @param cacheEnabled enables in-memory caching of index entries + * @return options + * @since ArangoDB 3.10 + */ + public PersistentIndexOptions cacheEnabled(final Boolean cacheEnabled) { + this.cacheEnabled = cacheEnabled; + return this; + } + + public Boolean getCacheEnabled() { + return cacheEnabled; + } + + public Collection getStoredValues() { + return storedValues; + } + + /** + * @param storedValues (optional) array of paths to additional attributes to store in the index. These additional + * attributes cannot be used for index lookups or for sorting, but they can be used for + * projections. This allows an index to fully cover more queries and avoid extra document + * lookups. The maximum number of attributes in `storedValues` is 32. + * @return options + */ + public PersistentIndexOptions storedValues(final String... storedValues) { + if (this.storedValues == null) { + this.storedValues = new HashSet<>(); + } + Collections.addAll(this.storedValues, storedValues); + return this; + } + +} diff --git a/core/src/main/java/com/arangodb/model/QueueTimeSample.java b/core/src/main/java/com/arangodb/model/QueueTimeSample.java new file mode 100644 index 000000000..453e5d4a9 --- /dev/null +++ b/core/src/main/java/com/arangodb/model/QueueTimeSample.java @@ -0,0 +1,40 @@ +package com.arangodb.model; + +import java.util.Objects; + +/** + * Represents an observed value of the server queue latency, as returned from the "X-Arango-Queue-Time-Seconds" response + * header. + * This header contains the most recent request (de)queuing time (in seconds) as tracked by the server’s scheduler. + * + * @author Michele Rastelli + */ +public final class QueueTimeSample { + /** + * Unix-timestamp in milliseconds, recorded at client side. + */ + public final long timestamp; + + /** + * Observed value. + */ + public final double value; + + public QueueTimeSample(long timestamp, double value) { + this.timestamp = timestamp; + this.value = value; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + QueueTimeSample that = (QueueTimeSample) o; + return timestamp == that.timestamp && Double.compare(that.value, value) == 0; + } + + @Override + public int hashCode() { + return Objects.hash(timestamp, value); + } +} diff --git a/core/src/main/java/com/arangodb/model/ReplaceEdgeDefinitionOptions.java b/core/src/main/java/com/arangodb/model/ReplaceEdgeDefinitionOptions.java new file mode 100644 index 000000000..3b9a1cb0e --- /dev/null +++ b/core/src/main/java/com/arangodb/model/ReplaceEdgeDefinitionOptions.java @@ -0,0 +1,33 @@ +package com.arangodb.model; + +public class ReplaceEdgeDefinitionOptions { + private Boolean waitForSync; + private Boolean dropCollections; + + public Boolean getWaitForSync() { + return waitForSync; + } + + /** + * @param waitForSync Define if the request should wait until synced to disk. + * @return this + */ + public ReplaceEdgeDefinitionOptions waitForSync(Boolean waitForSync) { + this.waitForSync = waitForSync; + return this; + } + + public Boolean getDropCollections() { + return dropCollections; + } + + /** + * @param dropCollections Drop the collection as well. Collection will only be dropped if it is not used in other + * graphs. + * @return this + */ + public ReplaceEdgeDefinitionOptions dropCollections(Boolean dropCollections) { + this.dropCollections = dropCollections; + return this; + } +} diff --git a/core/src/main/java/com/arangodb/model/StreamTransactionOptions.java b/core/src/main/java/com/arangodb/model/StreamTransactionOptions.java new file mode 100644 index 000000000..bd78d6c4f --- /dev/null +++ b/core/src/main/java/com/arangodb/model/StreamTransactionOptions.java @@ -0,0 +1,173 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.model; + +import com.fasterxml.jackson.annotation.JsonIgnore; + +/** + * @author Mark Vollmary + * @author Michele Rastelli + * @since ArangoDB 3.5.0 + */ +public final class StreamTransactionOptions { + + private final TransactionCollectionOptions collections; + private Integer lockTimeout; + private Boolean waitForSync; + private Long maxTransactionSize; + private Boolean allowImplicit; + @JsonIgnore + private Boolean allowDirtyRead; + private Boolean skipFastLockRound; + + public StreamTransactionOptions() { + super(); + collections = new TransactionCollectionOptions(); + } + + public TransactionCollectionOptions getCollections() { + return collections; + } + + public Integer getLockTimeout() { + return lockTimeout; + } + + /** + * @param lockTimeout a numeric value that can be used to set a timeout in seconds for + * waiting on collection locks. This option is only meaningful when using + * exclusive locks. If not specified, a default value of 900 seconds will be + * used. Setting lockTimeout to 0 will make ArangoDB not time out + * waiting for a lock. + * @return options + */ + public StreamTransactionOptions lockTimeout(final Integer lockTimeout) { + this.lockTimeout = lockTimeout; + return this; + } + + public Boolean getWaitForSync() { + return waitForSync; + } + + /** + * @param waitForSync an optional boolean flag that, if set, will force the transaction to write all data to disk + * before + * returning + * @return options + */ + public StreamTransactionOptions waitForSync(final Boolean waitForSync) { + this.waitForSync = waitForSync; + return this; + } + + /** + * @param read contains the array of collection-names to be used in the transaction (mandatory) for read + * @return options + */ + public StreamTransactionOptions readCollections(final String... read) { + collections.read(read); + return this; + } + + /** + * @param write contains the array of collection-names to be used in the transaction (mandatory) for write + * @return options + */ + public StreamTransactionOptions writeCollections(final String... write) { + collections.write(write); + return this; + } + + /** + * @param exclusive contains the array of collection-names to be used in the transaction (mandatory) for + * exclusive write + * @return options + */ + public StreamTransactionOptions exclusiveCollections(final String... exclusive) { + collections.exclusive(exclusive); + return this; + } + + public Boolean getAllowImplicit() { + return allowImplicit; + } + + /** + * @param allowImplicit Allow reading from undeclared collections. + * @return options + */ + public StreamTransactionOptions allowImplicit(final Boolean allowImplicit) { + this.allowImplicit = allowImplicit; + return this; + } + + public Long getMaxTransactionSize() { + return maxTransactionSize; + } + + /** + * @param maxTransactionSize Transaction size limit in bytes. Honored by the RocksDB storage engine only. + * @return options + */ + public StreamTransactionOptions maxTransactionSize(final Long maxTransactionSize) { + this.maxTransactionSize = maxTransactionSize; + return this; + } + + public Boolean getAllowDirtyRead() { + return allowDirtyRead; + } + + /** + * @param allowDirtyRead Set to {@code true} allows reading from followers in an active-failover setup. + * @return options + * @see API + * Documentation + * @since ArangoDB 3.4.0 + */ + public StreamTransactionOptions allowDirtyRead(final Boolean allowDirtyRead) { + this.allowDirtyRead = allowDirtyRead; + return this; + } + + public Boolean getSkipFastLockRound() { + return skipFastLockRound; + } + + /** + * @param skipFastLockRound Whether to disable fast locking for write operations. Skipping the fast lock round can + * be faster overall if there are many concurrent Stream Transactions queued that all try + * to lock the same collection exclusively. It avoids deadlocking and retrying which can + * occur with the fast locking by guaranteeing a deterministic locking order at the expense + * of each actual locking operation taking longer. + * Fast locking should not be skipped for read-only Stream Transactions because it degrades + * performance if there are no concurrent transactions that use exclusive locks on the same + * collection. + * Default: {@code false} + * @return options + * @since ArangoDB 3.12.0 + */ + public StreamTransactionOptions skipFastLockRound(final Boolean skipFastLockRound) { + this.skipFastLockRound = skipFastLockRound; + return this; + } +} diff --git a/core/src/main/java/com/arangodb/model/TransactionCollectionOptions.java b/core/src/main/java/com/arangodb/model/TransactionCollectionOptions.java new file mode 100644 index 000000000..ce1d4e466 --- /dev/null +++ b/core/src/main/java/com/arangodb/model/TransactionCollectionOptions.java @@ -0,0 +1,73 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.model; + +import java.util.Arrays; +import java.util.Collection; + +/** + * @author Mark Vollmary + * @author Michele Rastelli + */ +public final class TransactionCollectionOptions { + + private Collection read; + private Collection write; + private Collection exclusive; + private Boolean allowImplicit; + + public Collection getRead() { + return read; + } + + public TransactionCollectionOptions read(final String... read) { + this.read = Arrays.asList(read); + return this; + } + + public Collection getWrite() { + return write; + } + + public TransactionCollectionOptions write(final String... write) { + this.write = Arrays.asList(write); + return this; + } + + public Collection getExclusive() { + return exclusive; + } + + public TransactionCollectionOptions exclusive(final String... exclusive) { + this.exclusive = Arrays.asList(exclusive); + return this; + } + + public Boolean getAllowImplicit() { + return allowImplicit; + } + + public TransactionCollectionOptions allowImplicit(final Boolean allowImplicit) { + this.allowImplicit = allowImplicit; + return this; + } + +} diff --git a/core/src/main/java/com/arangodb/model/TransactionOptions.java b/core/src/main/java/com/arangodb/model/TransactionOptions.java new file mode 100644 index 000000000..a547f7732 --- /dev/null +++ b/core/src/main/java/com/arangodb/model/TransactionOptions.java @@ -0,0 +1,166 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.model; + +import com.arangodb.internal.serde.UserData; + +/** + * @author Mark Vollmary + * @author Michele Rastelli + */ +public final class TransactionOptions { + + private final TransactionCollectionOptions collections; + private String action; + private Object params; + private Integer lockTimeout; + private Boolean waitForSync; + private Long maxTransactionSize; + + public TransactionOptions() { + super(); + collections = new TransactionCollectionOptions(); + } + + public TransactionCollectionOptions getCollections() { + return collections; + } + + public String getAction() { + return action; + } + + /** + * @param action the actual transaction operations to be executed, in the form of stringified JavaScript code + * @return options + */ + TransactionOptions action(final String action) { + this.action = action; + return this; + } + + @UserData + public Object getParams() { + return params; + } + + /** + * @param params optional arguments passed to action + * @return options + */ + public TransactionOptions params(final Object params) { + this.params = params; + return this; + } + + public Integer getLockTimeout() { + return lockTimeout; + } + + /** + * @param lockTimeout a numeric value that can be used to set a timeout in seconds for + * waiting on collection locks. This option is only meaningful when using + * exclusive locks. If not specified, a default value of 900 seconds will be + * used. Setting lockTimeout to 0 will make ArangoDB not time out + * waiting for a lock. + * @return options + */ + public TransactionOptions lockTimeout(final Integer lockTimeout) { + this.lockTimeout = lockTimeout; + return this; + } + + public Boolean getWaitForSync() { + return waitForSync; + } + + /** + * @param waitForSync an optional boolean flag that, if set, will force the transaction to write all data to disk + * before + * returning + * @return options + */ + public TransactionOptions waitForSync(final Boolean waitForSync) { + this.waitForSync = waitForSync; + return this; + } + + /** + * @param read contains the array of collection-names to be used in the transaction (mandatory) for read + * @return options + */ + public TransactionOptions readCollections(final String... read) { + collections.read(read); + return this; + } + + /** + * @param write contains the array of collection-names to be used in the transaction (mandatory) for write + * @return options + */ + public TransactionOptions writeCollections(final String... write) { + collections.write(write); + return this; + } + + /** + * @param exclusive contains the array of collection-names to be used in the transaction (mandatory) for + * exclusive write + * @return options + * @since ArangoDB 3.4.0 + */ + public TransactionOptions exclusiveCollections(final String... exclusive) { + collections.exclusive(exclusive); + return this; + } + + /** + * @param allowImplicit Collections that will be written to in the transaction must be declared with the write + * attribute or it + * will fail, whereas non-declared collections from which is solely read will be added + * lazily. The + * optional attribute allowImplicit can be set to false to let transactions fail in case of + * undeclared + * collections for reading. Collections for reading should be fully declared if possible, to + * avoid + * deadlocks. + * @return options + */ + public TransactionOptions allowImplicit(final Boolean allowImplicit) { + collections.allowImplicit(allowImplicit); + return this; + } + + public Long getMaxTransactionSize() { + return maxTransactionSize; + } + + /** + * @param maxTransactionSize Transaction size limit in bytes. Honored by the RocksDB storage engine only. + * @return options + * @since ArangoDB 3.2.0 + */ + public TransactionOptions maxTransactionSize(final Long maxTransactionSize) { + this.maxTransactionSize = maxTransactionSize; + return this; + } + +} diff --git a/core/src/main/java/com/arangodb/model/TransactionalOptions.java b/core/src/main/java/com/arangodb/model/TransactionalOptions.java new file mode 100644 index 000000000..c4a564c68 --- /dev/null +++ b/core/src/main/java/com/arangodb/model/TransactionalOptions.java @@ -0,0 +1,25 @@ +package com.arangodb.model; + +import com.arangodb.arch.NoRawTypesInspection; + +@NoRawTypesInspection +public abstract class TransactionalOptions> { + + abstract T getThis(); + + private String streamTransactionId; + + public String getStreamTransactionId() { + return streamTransactionId; + } + + /** + * @param streamTransactionId If set, the operation will be executed within the transaction. + * @return options + */ + public T streamTransactionId(final String streamTransactionId) { + this.streamTransactionId = streamTransactionId; + return getThis(); + } + +} diff --git a/core/src/main/java/com/arangodb/model/TtlIndexOptions.java b/core/src/main/java/com/arangodb/model/TtlIndexOptions.java new file mode 100644 index 000000000..fa9368690 --- /dev/null +++ b/core/src/main/java/com/arangodb/model/TtlIndexOptions.java @@ -0,0 +1,74 @@ +/* + * DISCLAIMER + * + * Copyright 2019 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.model; + +import com.arangodb.entity.IndexType; + +/** + * @author Heiko Kernbach + */ +public final class TtlIndexOptions extends IndexOptions { + + private final IndexType type = IndexType.ttl; + private Iterable fields; + private Integer expireAfter; + + public TtlIndexOptions() { + super(); + } + + @Override + TtlIndexOptions getThis() { + return this; + } + + public Iterable getFields() { + return fields; + } + + /** + * @param fields A list of attribute paths + * @return options + */ + TtlIndexOptions fields(final Iterable fields) { + this.fields = fields; + return this; + } + + public IndexType getType() { + return type; + } + + /** + * @param expireAfter The time (in seconds) after a document’s creation after which the documents count as + * β€œexpired”. + * @return options + */ + public TtlIndexOptions expireAfter(final Integer expireAfter) { + this.expireAfter = expireAfter; + return this; + } + + public Integer getExpireAfter() { + return expireAfter; + } + +} diff --git a/src/main/java/com/arangodb/model/UserAccessOptions.java b/core/src/main/java/com/arangodb/model/UserAccessOptions.java similarity index 72% rename from src/main/java/com/arangodb/model/UserAccessOptions.java rename to core/src/main/java/com/arangodb/model/UserAccessOptions.java index 46c45fc16..9ac9ebfb3 100644 --- a/src/main/java/com/arangodb/model/UserAccessOptions.java +++ b/core/src/main/java/com/arangodb/model/UserAccessOptions.java @@ -1,46 +1,45 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.model; - -import com.arangodb.entity.Permissions; - -/** - * @author Mark Vollmary - * - */ -public class UserAccessOptions { - - private Permissions grant; - - public UserAccessOptions() { - super(); - } - - protected Permissions getGrant() { - return grant; - } - - protected UserAccessOptions grant(final Permissions grant) { - this.grant = grant; - return this; - } - -} +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.model; + +import com.arangodb.entity.Permissions; + +/** + * @author Mark Vollmary + */ +public final class UserAccessOptions { + + private Permissions grant; + + public UserAccessOptions() { + super(); + } + + public Permissions getGrant() { + return grant; + } + + UserAccessOptions grant(final Permissions grant) { + this.grant = grant; + return this; + } + +} diff --git a/core/src/main/java/com/arangodb/model/UserCreateOptions.java b/core/src/main/java/com/arangodb/model/UserCreateOptions.java new file mode 100644 index 000000000..7f7f93402 --- /dev/null +++ b/core/src/main/java/com/arangodb/model/UserCreateOptions.java @@ -0,0 +1,92 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.model; + +import java.util.Map; + +/** + * @author Mark Vollmary + */ +public final class UserCreateOptions { + + private String user; + private String passwd; + private Boolean active; + private Map extra; + + public UserCreateOptions() { + super(); + } + + public String getUser() { + return user; + } + + /** + * @param user The name of the user + * @return options + */ + UserCreateOptions user(final String user) { + this.user = user; + return this; + } + + public String getPasswd() { + return passwd; + } + + /** + * @param passwd The user password + * @return options + */ + UserCreateOptions passwd(final String passwd) { + this.passwd = passwd; + return this; + } + + public Boolean getActive() { + return active; + } + + /** + * @param active An optional flag that specifies whether the user is active. If not specified, this will default to + * true + * @return options + */ + public UserCreateOptions active(final Boolean active) { + this.active = active; + return this; + } + + public Map getExtra() { + return extra; + } + + /** + * @param extra Optional data about the user + * @return options + */ + public UserCreateOptions extra(final Map extra) { + this.extra = extra; + return this; + } + +} diff --git a/core/src/main/java/com/arangodb/model/UserUpdateOptions.java b/core/src/main/java/com/arangodb/model/UserUpdateOptions.java new file mode 100644 index 000000000..4eb008ef0 --- /dev/null +++ b/core/src/main/java/com/arangodb/model/UserUpdateOptions.java @@ -0,0 +1,78 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.model; + +import java.util.Map; + +/** + * @author Mark Vollmary + */ +public final class UserUpdateOptions { + + private String passwd; + private Boolean active; + private Map extra; + + public UserUpdateOptions() { + super(); + } + + public String getPasswd() { + return passwd; + } + + /** + * @param passwd The user password + * @return options + */ + public UserUpdateOptions passwd(final String passwd) { + this.passwd = passwd; + return this; + } + + public Boolean getActive() { + return active; + } + + /** + * @param active An optional flag that specifies whether the user is active. If not specified, this will default to + * true + * @return options + */ + public UserUpdateOptions active(final Boolean active) { + this.active = active; + return this; + } + + public Map getExtra() { + return extra; + } + + /** + * @param extra Optional data about the user + * @return options + */ + public UserUpdateOptions extra(final Map extra) { + this.extra = extra; + return this; + } + +} diff --git a/core/src/main/java/com/arangodb/model/VertexCollectionCreateOptions.java b/core/src/main/java/com/arangodb/model/VertexCollectionCreateOptions.java new file mode 100644 index 000000000..d7ab02a70 --- /dev/null +++ b/core/src/main/java/com/arangodb/model/VertexCollectionCreateOptions.java @@ -0,0 +1,79 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.model; + +import java.util.Arrays; +import java.util.Collection; + +/** + * @author Mark Vollmary + */ +public final class VertexCollectionCreateOptions { + + private final Options options = new Options(); + private String collection; + + public VertexCollectionCreateOptions() { + super(); + } + + public String getCollection() { + return collection; + } + + /** + * @param collection The name of the collection + * @return options + */ + VertexCollectionCreateOptions collection(final String collection) { + this.collection = collection; + return this; + } + + public Options getOptions() { + return options; + } + + public Collection getSatellites() { + return options.satellites; + } + + /** + * @param satellites collection names that will be used to create SatelliteCollections + * for a Hybrid (Disjoint) SmartGraph (Enterprise Edition only). Each array element + * must be a valid collection name. The collection type cannot be modified later. + * @return options + * @since ArangoDB 3.9.0 + */ + public VertexCollectionCreateOptions satellites(final String... satellites) { + options.satellites = Arrays.asList(satellites); + return this; + } + + public static final class Options { + private Collection satellites; + + public Collection getSatellites() { + return satellites; + } + } + +} diff --git a/core/src/main/java/com/arangodb/model/VertexCollectionDropOptions.java b/core/src/main/java/com/arangodb/model/VertexCollectionDropOptions.java new file mode 100644 index 000000000..aa940d3bd --- /dev/null +++ b/core/src/main/java/com/arangodb/model/VertexCollectionDropOptions.java @@ -0,0 +1,23 @@ +package com.arangodb.model; + +/** + * @deprecated use {@link VertexCollectionRemoveOptions} instead + */ +@Deprecated +public class VertexCollectionDropOptions { + private Boolean dropCollection; + + public Boolean getDropCollection() { + return dropCollection; + } + + /** + * @param dropCollection Drop the collection as well. Collection will only be dropped if it is not used in other + * graphs. + * @return this + */ + public VertexCollectionDropOptions dropCollection(Boolean dropCollection) { + this.dropCollection = dropCollection; + return this; + } +} diff --git a/core/src/main/java/com/arangodb/model/VertexCollectionRemoveOptions.java b/core/src/main/java/com/arangodb/model/VertexCollectionRemoveOptions.java new file mode 100644 index 000000000..897199fde --- /dev/null +++ b/core/src/main/java/com/arangodb/model/VertexCollectionRemoveOptions.java @@ -0,0 +1,19 @@ +package com.arangodb.model; + +public class VertexCollectionRemoveOptions { + private Boolean dropCollection; + + public Boolean getDropCollection() { + return dropCollection; + } + + /** + * @param dropCollection Drop the collection as well. Collection will only be dropped if it is not used in other + * graphs. + * @return this + */ + public VertexCollectionRemoveOptions dropCollection(Boolean dropCollection) { + this.dropCollection = dropCollection; + return this; + } +} diff --git a/src/main/java/com/arangodb/model/AqlQueryParseOptions.java b/core/src/main/java/com/arangodb/model/VertexCreateOptions.java similarity index 59% rename from src/main/java/com/arangodb/model/AqlQueryParseOptions.java rename to core/src/main/java/com/arangodb/model/VertexCreateOptions.java index 0b288286f..36cd2395b 100644 --- a/src/main/java/com/arangodb/model/AqlQueryParseOptions.java +++ b/core/src/main/java/com/arangodb/model/VertexCreateOptions.java @@ -1,50 +1,48 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.model; - -/** - * @author Mark Vollmary - * - * @see API Documentation - */ -public class AqlQueryParseOptions { - - private String query; - - public AqlQueryParseOptions() { - super(); - } - - protected String getQuery() { - return query; - } - - /** - * @param query - * the query which you want parse - * @return options - */ - protected AqlQueryParseOptions query(final String query) { - this.query = query; - return this; - } - -} +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.model; + +/** + * @author Mark Vollmary + */ +public final class VertexCreateOptions extends TransactionalOptions { + + private Boolean waitForSync; + + @Override + VertexCreateOptions getThis() { + return this; + } + + public Boolean getWaitForSync() { + return waitForSync; + } + + /** + * @param waitForSync Wait until document has been synced to disk. + * @return options + */ + public VertexCreateOptions waitForSync(final Boolean waitForSync) { + this.waitForSync = waitForSync; + return this; + } + +} diff --git a/core/src/main/java/com/arangodb/model/VertexDeleteOptions.java b/core/src/main/java/com/arangodb/model/VertexDeleteOptions.java new file mode 100644 index 000000000..82fba23aa --- /dev/null +++ b/core/src/main/java/com/arangodb/model/VertexDeleteOptions.java @@ -0,0 +1,62 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.model; + +/** + * @author Mark Vollmary + */ +public final class VertexDeleteOptions extends TransactionalOptions { + + private Boolean waitForSync; + private String ifMatch; + + @Override + VertexDeleteOptions getThis() { + return this; + } + + public Boolean getWaitForSync() { + return waitForSync; + } + + /** + * @param waitForSync Wait until deletion operation has been synced to disk. + * @return options + */ + public VertexDeleteOptions waitForSync(final Boolean waitForSync) { + this.waitForSync = waitForSync; + return this; + } + + public String getIfMatch() { + return ifMatch; + } + + /** + * @param ifMatch remove a document based on a target revision + * @return options + */ + public VertexDeleteOptions ifMatch(final String ifMatch) { + this.ifMatch = ifMatch; + return this; + } + +} diff --git a/core/src/main/java/com/arangodb/model/VertexReplaceOptions.java b/core/src/main/java/com/arangodb/model/VertexReplaceOptions.java new file mode 100644 index 000000000..6d4dd12ea --- /dev/null +++ b/core/src/main/java/com/arangodb/model/VertexReplaceOptions.java @@ -0,0 +1,62 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.model; + +/** + * @author Mark Vollmary + */ +public final class VertexReplaceOptions extends TransactionalOptions { + + private Boolean waitForSync; + private String ifMatch; + + @Override + VertexReplaceOptions getThis() { + return this; + } + + public Boolean getWaitForSync() { + return waitForSync; + } + + /** + * @param waitForSync Wait until document has been synced to disk. + * @return options + */ + public VertexReplaceOptions waitForSync(final Boolean waitForSync) { + this.waitForSync = waitForSync; + return this; + } + + public String getIfMatch() { + return ifMatch; + } + + /** + * @param ifMatch replace a document based on target revision + * @return options + */ + public VertexReplaceOptions ifMatch(final String ifMatch) { + this.ifMatch = ifMatch; + return this; + } + +} diff --git a/core/src/main/java/com/arangodb/model/VertexUpdateOptions.java b/core/src/main/java/com/arangodb/model/VertexUpdateOptions.java new file mode 100644 index 000000000..f1d0e5bf7 --- /dev/null +++ b/core/src/main/java/com/arangodb/model/VertexUpdateOptions.java @@ -0,0 +1,79 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.model; + +/** + * @author Mark Vollmary + */ +public final class VertexUpdateOptions extends TransactionalOptions { + + private Boolean keepNull; + private Boolean waitForSync; + private String ifMatch; + + @Override + VertexUpdateOptions getThis() { + return this; + } + + public Boolean getKeepNull() { + return keepNull; + } + + /** + * @param keepNull If the intention is to delete existing attributes with the patch command, the URL query parameter + * keepNull can be used with a value of false. This will modify the behavior of the patch command to + * remove any attributes from the existing document that are contained in the patch document with an + * attribute value of null. + * @return options + */ + public VertexUpdateOptions keepNull(final Boolean keepNull) { + this.keepNull = keepNull; + return this; + } + + public Boolean getWaitForSync() { + return waitForSync; + } + + /** + * @param waitForSync Wait until document has been synced to disk. + * @return options + */ + public VertexUpdateOptions waitForSync(final Boolean waitForSync) { + this.waitForSync = waitForSync; + return this; + } + + public String getIfMatch() { + return ifMatch; + } + + /** + * @param ifMatch replace a document based on target revision + * @return options + */ + public VertexUpdateOptions ifMatch(final String ifMatch) { + this.ifMatch = ifMatch; + return this; + } + +} diff --git a/core/src/main/java/com/arangodb/model/ViewCreateOptions.java b/core/src/main/java/com/arangodb/model/ViewCreateOptions.java new file mode 100644 index 000000000..e0a58273d --- /dev/null +++ b/core/src/main/java/com/arangodb/model/ViewCreateOptions.java @@ -0,0 +1,55 @@ +/* + * DISCLAIMER + * + * Copyright 2018 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.model; + +import com.arangodb.entity.ViewType; + +/** + * @author Mark Vollmary + */ +public final class ViewCreateOptions { + + private String name; + private ViewType type; + + public ViewCreateOptions() { + super(); + } + + ViewCreateOptions name(final String name) { + this.name = name; + return this; + } + + ViewCreateOptions type(final ViewType type) { + this.type = type; + return this; + } + + public String getName() { + return name; + } + + public ViewType getType() { + return type; + } + +} diff --git a/core/src/main/java/com/arangodb/model/ViewRenameOptions.java b/core/src/main/java/com/arangodb/model/ViewRenameOptions.java new file mode 100644 index 000000000..2f0ed5faf --- /dev/null +++ b/core/src/main/java/com/arangodb/model/ViewRenameOptions.java @@ -0,0 +1,47 @@ +/* + * DISCLAIMER + * + * Copyright 2018 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.model; + +/** + * @author Mark Vollmary + */ +public final class ViewRenameOptions { + + private String name; + + public ViewRenameOptions() { + super(); + } + + public String getName() { + return name; + } + + /** + * @param name The new name + * @return options + */ + ViewRenameOptions name(final String name) { + this.name = name; + return this; + } + +} diff --git a/core/src/main/java/com/arangodb/model/ZKDIndexOptions.java b/core/src/main/java/com/arangodb/model/ZKDIndexOptions.java new file mode 100644 index 000000000..cb428fe08 --- /dev/null +++ b/core/src/main/java/com/arangodb/model/ZKDIndexOptions.java @@ -0,0 +1,96 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.model; + +import com.arangodb.entity.IndexType; +import com.fasterxml.jackson.annotation.JsonProperty; + +/** + * @author Michele Rastelli + * @since ArangoDB 3.9 + * @deprecated since ArangoDB 3.12, use {@link MDIndexOptions} instead. + */ +@Deprecated +public final class ZKDIndexOptions extends IndexOptions { + + final IndexType type = IndexType.zkd; + private Iterable fields; + private Boolean unique; + private FieldValueTypes fieldValueTypes; + + public ZKDIndexOptions() { + super(); + } + + @Override + ZKDIndexOptions getThis() { + return this; + } + + public Iterable getFields() { + return fields; + } + + /** + * @param fields A list of attribute paths + * @return options + */ + ZKDIndexOptions fields(final Iterable fields) { + this.fields = fields; + return this; + } + + public IndexType getType() { + return type; + } + + public Boolean getUnique() { + return unique; + } + + /** + * @param unique if true, then create a unique index + * @return options + */ + public ZKDIndexOptions unique(final Boolean unique) { + this.unique = unique; + return this; + } + + public FieldValueTypes getFieldValueTypes() { + return fieldValueTypes; + } + + /** + * @param fieldValueTypes must be {@link FieldValueTypes#DOUBLE}, currently only doubles are supported as values. + * @return options + */ + public ZKDIndexOptions fieldValueTypes(final FieldValueTypes fieldValueTypes) { + this.fieldValueTypes = fieldValueTypes; + return this; + } + + public enum FieldValueTypes { + @JsonProperty("double") + DOUBLE + } + +} diff --git a/core/src/main/java/com/arangodb/model/arangosearch/AnalyzerDeleteOptions.java b/core/src/main/java/com/arangodb/model/arangosearch/AnalyzerDeleteOptions.java new file mode 100644 index 000000000..f4115efec --- /dev/null +++ b/core/src/main/java/com/arangodb/model/arangosearch/AnalyzerDeleteOptions.java @@ -0,0 +1,37 @@ +/* + * DISCLAIMER + * + * Copyright 2018 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.model.arangosearch; + +/** + * @author Michele Rastelli + */ +public final class AnalyzerDeleteOptions { + + private Boolean force; + + public Boolean getForce() { + return force; + } + + public void setForce(Boolean force) { + this.force = force; + } +} diff --git a/core/src/main/java/com/arangodb/model/arangosearch/ArangoSearchCreateOptions.java b/core/src/main/java/com/arangodb/model/arangosearch/ArangoSearchCreateOptions.java new file mode 100644 index 000000000..1361f9c9d --- /dev/null +++ b/core/src/main/java/com/arangodb/model/arangosearch/ArangoSearchCreateOptions.java @@ -0,0 +1,254 @@ +/* + * DISCLAIMER + * + * Copyright 2018 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.model.arangosearch; + +import com.arangodb.entity.ViewType; +import com.arangodb.entity.arangosearch.*; +import com.arangodb.internal.serde.InternalSerializers; +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.fasterxml.jackson.databind.annotation.JsonSerialize; + +import java.util.Arrays; +import java.util.Collection; + +/** + * @author Mark Vollmary + */ +public final class ArangoSearchCreateOptions { + + private final ViewType type; + private String name; + private Long consolidationIntervalMsec; + private Long commitIntervalMsec; + private Long cleanupIntervalStep; + private ConsolidationPolicy consolidationPolicy; + private Collection links; + private Collection primarySorts; + private ArangoSearchCompression primarySortCompression; + private Collection storedValues; + private Collection optimizeTopK; + private Boolean primarySortCache; + private Boolean primaryKeyCache; + + public ArangoSearchCreateOptions() { + super(); + type = ViewType.ARANGO_SEARCH; + } + + ArangoSearchCreateOptions name(final String name) { + this.name = name; + return this; + } + + /** + * @param consolidationIntervalMsec Wait at least this many milliseconds between committing index data changes and + * making them visible to queries (default: 60000, to disable use: 0). For the case + * where there are a lot of inserts/updates, a lower value, until commit, will + * cause the index not to account for them and memory usage would continue to grow. + * For the case where there are a few inserts/updates, a higher value will impact + * performance and waste disk space for each commit call without any added + * benefits. + * @return options + */ + public ArangoSearchCreateOptions consolidationIntervalMsec(final Long consolidationIntervalMsec) { + this.consolidationIntervalMsec = consolidationIntervalMsec; + return this; + } + + /** + * @param commitIntervalMsec Wait at least this many milliseconds between committing view data store changes and + * making documents visible to queries (default: 1000, to disable use: 0). For the case + * where there are a lot of inserts/updates, a lower value, until commit, will cause the + * index not to account for them and memory usage would continue to grow. For the case + * where there are a few inserts/updates, a higher value will impact performance and waste + * disk space for each commit call without any added benefits. Background: For data + * retrieval ArangoSearch views follow the concept of β€œeventually-consistent”, i.e. + * eventually all the data in ArangoDB will be matched by corresponding query expressions. + * The concept of ArangoSearch view β€œcommit” operation is introduced to control the + * upper-bound on the time until document addition/removals are actually reflected by + * corresponding query expressions. Once a β€œcommit” operation is complete all documents + * added/removed prior to the start of the β€œcommit” operation will be reflected by queries + * invoked in subsequent ArangoDB transactions, in-progress ArangoDB transactions will + * still continue to return a repeatable-read state. + * @return options + */ + public ArangoSearchCreateOptions commitIntervalMsec(final Long commitIntervalMsec) { + this.commitIntervalMsec = commitIntervalMsec; + return this; + } + + /** + * @param cleanupIntervalStep Wait at least this many commits between removing unused files in data directory + * (default: 10, to disable use: 0). For the case where the consolidation policies merge + * segments often (i.e. a lot of commit+consolidate), a lower value will cause a lot of + * disk space to be wasted. For the case where the consolidation policies rarely merge + * segments (i.e. few inserts/deletes), a higher value will impact performance without + * any added benefits. + * @return options + */ + public ArangoSearchCreateOptions cleanupIntervalStep(final Long cleanupIntervalStep) { + this.cleanupIntervalStep = cleanupIntervalStep; + return this; + } + + /** + * @param consolidationPolicy + * @return options + */ + public ArangoSearchCreateOptions consolidationPolicy(final ConsolidationPolicy consolidationPolicy) { + this.consolidationPolicy = consolidationPolicy; + return this; + } + + /** + * @param links A list of linked collections + * @return options + */ + public ArangoSearchCreateOptions link(final CollectionLink... links) { + this.links = Arrays.asList(links); + return this; + } + + /** + * @param primarySorts A list of linked collections + * @return options + */ + public ArangoSearchCreateOptions primarySort(final PrimarySort... primarySorts) { + this.primarySorts = Arrays.asList(primarySorts); + return this; + } + + /** + * @param primarySortCompression Defines how to compress the primary sort data + * @return options + */ + public ArangoSearchCreateOptions primarySortCompression(final ArangoSearchCompression primarySortCompression) { + this.primarySortCompression = primarySortCompression; + return this; + } + + /** + * @return options + */ + public ArangoSearchCreateOptions storedValues(final StoredValue... storedValues) { + this.storedValues = Arrays.asList(storedValues); + return this; + } + + /** + * @param optimizeTopK An array of strings defining sort expressions that you want to optimize. + * @return options + * @since ArangoDB 3.11, Enterprise Edition only + */ + public ArangoSearchCreateOptions optimizeTopK(final String... optimizeTopK) { + this.optimizeTopK = Arrays.asList(optimizeTopK); + return this; + } + + /** + * @param primarySortCache If you enable this option, then the primary sort columns are always cached in memory. + * This can improve the performance of queries that utilize the primary sort order. + * Otherwise, these values are memory-mapped and it is up to the operating system to load + * them from disk into memory and to evict them from memory. + * @return options + * @since ArangoDB 3.9.6, Enterprise Edition only + */ + public ArangoSearchCreateOptions primarySortCache(final Boolean primarySortCache) { + this.primarySortCache = primarySortCache; + return this; + } + + /** + * @param primaryKeyCache If you enable this option, then the primary key columns are always cached in memory. This + * can improve the performance of queries that return many documents. Otherwise, these values + * are memory-mapped and it is up to the operating system to load them from disk into memory + * and to evict them from memory. + * @return options + * @since ArangoDB 3.9.6, Enterprise Edition only + */ + public ArangoSearchCreateOptions primaryKeyCache(final Boolean primaryKeyCache) { + this.primaryKeyCache = primaryKeyCache; + return this; + } + + public String getName() { + return name; + } + + public ViewType getType() { + return type; + } + + public Long getConsolidationIntervalMsec() { + return consolidationIntervalMsec; + } + + public Long getCommitIntervalMsec() { + return commitIntervalMsec; + } + + public Long getCleanupIntervalStep() { + return cleanupIntervalStep; + } + + public ConsolidationPolicy getConsolidationPolicy() { + return consolidationPolicy; + } + + @JsonSerialize(using = InternalSerializers.CollectionLinksSerializer.class) + public Collection getLinks() { + return links; + } + + /** + * @deprecated for removal, use {@link #getPrimarySort()} instead + */ + @Deprecated + @JsonIgnore + public Collection getPrimarySorts() { + return getPrimarySort(); + } + + public Collection getPrimarySort() { + return primarySorts; + } + + public ArangoSearchCompression getPrimarySortCompression() { + return primarySortCompression; + } + + public Collection getStoredValues() { + return storedValues; + } + + public Collection getOptimizeTopK() { + return optimizeTopK; + } + + public Boolean getPrimarySortCache() { + return primarySortCache; + } + + public Boolean getPrimaryKeyCache() { + return primaryKeyCache; + } + +} diff --git a/core/src/main/java/com/arangodb/model/arangosearch/ArangoSearchOptionsBuilder.java b/core/src/main/java/com/arangodb/model/arangosearch/ArangoSearchOptionsBuilder.java new file mode 100644 index 000000000..2bdf10827 --- /dev/null +++ b/core/src/main/java/com/arangodb/model/arangosearch/ArangoSearchOptionsBuilder.java @@ -0,0 +1,35 @@ +/* + * DISCLAIMER + * + * Copyright 2018 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.model.arangosearch; + +/** + * @author Mark Vollmary + */ +public abstract class ArangoSearchOptionsBuilder { + + private ArangoSearchOptionsBuilder() { + super(); + } + + public static ArangoSearchCreateOptions build(final ArangoSearchCreateOptions options, final String name) { + return options.name(name); + } +} diff --git a/core/src/main/java/com/arangodb/model/arangosearch/ArangoSearchPropertiesOptions.java b/core/src/main/java/com/arangodb/model/arangosearch/ArangoSearchPropertiesOptions.java new file mode 100644 index 000000000..912540c7f --- /dev/null +++ b/core/src/main/java/com/arangodb/model/arangosearch/ArangoSearchPropertiesOptions.java @@ -0,0 +1,125 @@ +/* + * DISCLAIMER + * + * Copyright 2018 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.model.arangosearch; + +import com.arangodb.entity.arangosearch.CollectionLink; +import com.arangodb.entity.arangosearch.ConsolidationPolicy; +import com.arangodb.entity.arangosearch.PrimarySort; +import com.arangodb.internal.serde.InternalSerializers; +import com.fasterxml.jackson.databind.annotation.JsonSerialize; + +import java.util.Arrays; +import java.util.Collection; + +/** + * @author Mark Vollmary + */ +public final class ArangoSearchPropertiesOptions { + + private Long consolidationIntervalMsec; + private Long cleanupIntervalStep; + private ConsolidationPolicy consolidationPolicy; + private Collection links; + private Collection primarySorts; + + /** + * @param consolidationIntervalMsec Wait at least this many milliseconds between committing index data changes + * and making them visible to + * queries (default: 60000, to disable use: 0). For the case where there are a + * lot of inserts/updates, a + * lower value, until commit, will cause the index not to account for them and + * memory usage would + * continue to grow. For the case where there are a few inserts/updates, a + * higher value will impact + * performance and waste disk space for each commit call without any added + * benefits. + * @return options + */ + public ArangoSearchPropertiesOptions consolidationIntervalMsec(final Long consolidationIntervalMsec) { + this.consolidationIntervalMsec = consolidationIntervalMsec; + return this; + } + + /** + * @param cleanupIntervalStep Wait at least this many commits between removing unused files in data directory + * (default: 10, to + * disable use: 0). For the case where the consolidation policies merge segments often + * (i.e. a lot of + * commit+consolidate), a lower value will cause a lot of disk space to be wasted. For + * the case where the + * consolidation policies rarely merge segments (i.e. few inserts/deletes), a higher + * value will impact + * performance without any added benefits. + * @return options + */ + public ArangoSearchPropertiesOptions cleanupIntervalStep(final Long cleanupIntervalStep) { + this.cleanupIntervalStep = cleanupIntervalStep; + return this; + } + + /** + * @param consolidationPolicy + * @return options + */ + public ArangoSearchPropertiesOptions consolidationPolicy(final ConsolidationPolicy consolidationPolicy) { + this.consolidationPolicy = consolidationPolicy; + return this; + } + + /** + * @param links A list of linked collections + * @return options + */ + public ArangoSearchPropertiesOptions link(final CollectionLink... links) { + this.links = Arrays.asList(links); + return this; + } + + /** + * @param primarySorts A list of primarySort objects + * @return options + */ + public ArangoSearchPropertiesOptions primarySort(final PrimarySort... primarySorts) { + this.primarySorts = Arrays.asList(primarySorts); + return this; + } + + public Long getConsolidationIntervalMsec() { + return consolidationIntervalMsec; + } + + public Long getCleanupIntervalStep() { + return cleanupIntervalStep; + } + + public ConsolidationPolicy getConsolidationPolicy() { + return consolidationPolicy; + } + + @JsonSerialize(using = InternalSerializers.CollectionLinksSerializer.class) + public Collection getLinks() { + return links; + } + + public Collection getPrimarySorts() { + return primarySorts; + } +} diff --git a/core/src/main/java/com/arangodb/model/arangosearch/SearchAliasCreateOptions.java b/core/src/main/java/com/arangodb/model/arangosearch/SearchAliasCreateOptions.java new file mode 100644 index 000000000..6456fe6ee --- /dev/null +++ b/core/src/main/java/com/arangodb/model/arangosearch/SearchAliasCreateOptions.java @@ -0,0 +1,71 @@ +/* + * DISCLAIMER + * + * Copyright 2018 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.model.arangosearch; + +import com.arangodb.entity.ViewType; +import com.arangodb.entity.arangosearch.*; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; + +/** + * @author Michele Rastelli + * @since ArangoDB 3.10 + */ +public final class SearchAliasCreateOptions { + + private String name; + private final ViewType type; + + private final Collection indexes = new ArrayList<>(); + + public SearchAliasCreateOptions() { + super(); + type = ViewType.SEARCH_ALIAS; + } + + SearchAliasCreateOptions name(final String name) { + this.name = name; + return this; + } + + /** + * @param indexes A list of inverted indexes to add to the View. + * @return options + */ + public SearchAliasCreateOptions indexes(final SearchAliasIndex... indexes) { + Collections.addAll(this.indexes, indexes); + return this; + } + + public String getName() { + return name; + } + + public ViewType getType() { + return type; + } + + public Collection getIndexes() { + return indexes; + } +} diff --git a/core/src/main/java/com/arangodb/model/arangosearch/SearchAliasOptionsBuilder.java b/core/src/main/java/com/arangodb/model/arangosearch/SearchAliasOptionsBuilder.java new file mode 100644 index 000000000..155e916be --- /dev/null +++ b/core/src/main/java/com/arangodb/model/arangosearch/SearchAliasOptionsBuilder.java @@ -0,0 +1,35 @@ +/* + * DISCLAIMER + * + * Copyright 2018 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.model.arangosearch; + +/** + * @author Michele Rastelli + */ +public abstract class SearchAliasOptionsBuilder { + + private SearchAliasOptionsBuilder() { + super(); + } + + public static SearchAliasCreateOptions build(final SearchAliasCreateOptions options, final String name) { + return options.name(name); + } +} diff --git a/core/src/main/java/com/arangodb/model/arangosearch/SearchAliasPropertiesOptions.java b/core/src/main/java/com/arangodb/model/arangosearch/SearchAliasPropertiesOptions.java new file mode 100644 index 000000000..7530b2cb3 --- /dev/null +++ b/core/src/main/java/com/arangodb/model/arangosearch/SearchAliasPropertiesOptions.java @@ -0,0 +1,49 @@ +/* + * DISCLAIMER + * + * Copyright 2018 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.model.arangosearch; + +import com.arangodb.entity.arangosearch.*; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; + +/** + * @author Michele Rastelli + * @since ArangoDB 3.10 + */ +public final class SearchAliasPropertiesOptions { + + private final Collection indexes = new ArrayList<>(); + + /** + * @param indexes A list of inverted indexes to add to the View. + * @return options + */ + public SearchAliasPropertiesOptions indexes(SearchAliasIndex... indexes) { + Collections.addAll(this.indexes, indexes); + return this; + } + + public Collection getIndexes() { + return indexes; + } +} diff --git a/core/src/main/java/com/arangodb/serde/ArangoSerde.java b/core/src/main/java/com/arangodb/serde/ArangoSerde.java new file mode 100644 index 000000000..d7a4ff8e7 --- /dev/null +++ b/core/src/main/java/com/arangodb/serde/ArangoSerde.java @@ -0,0 +1,51 @@ +package com.arangodb.serde; + +import com.arangodb.ContentType; +import com.arangodb.RequestContext; + +import java.util.Objects; + +/** + * Contract for serialization/deserialization of user data. + * Implementations of this interface could be used for customizing serialization/deserialization of user related data + * using serialization/deserialization libraries like: + * - serialization libraries for specific JVM languages (e.g. Scala, Kotlin, ...) + * - serialization libraries already in use in frameworks (e.g. JSON-B, Micronaut Serialization, ...) + * - high performance serialization libraries (e.g. supporting compile-time data binding code generation) + * - low-level libraries without support to data binding + */ +public interface ArangoSerde { + + /** + * Serializes the object into the target data type. For data type {@link ContentType#JSON}, the serialized JSON string + * must be encoded into a byte array using the UTF-8 charset. + * + * @param value object to serialize + * @return serialized byte array + */ + byte[] serialize(Object value); + + /** + * Deserializes the content and binds it to the target data type. + * For data type {@link ContentType#JSON}, the byte array is the JSON string encoded using the UTF-8 charset. + * + * @param content byte array to deserialize + * @param clazz class of target data type + * @return deserialized object + */ + T deserialize(byte[] content, Class clazz); + + /** + * Deserializes the content and binds it to the target data type. + * For data type {@link ContentType#JSON}, the byte array is the JSON string encoded using the UTF-8 charset. + * + * @param content byte array to deserialize + * @param clazz class of target data type + * @param ctx serde context, cannot be null + * @return deserialized object + */ + default T deserialize(byte[] content, Class clazz, RequestContext ctx) { + Objects.requireNonNull(ctx); + return deserialize(content, clazz); + } +} diff --git a/core/src/main/java/com/arangodb/serde/ArangoSerdeProvider.java b/core/src/main/java/com/arangodb/serde/ArangoSerdeProvider.java new file mode 100644 index 000000000..7fa1e048a --- /dev/null +++ b/core/src/main/java/com/arangodb/serde/ArangoSerdeProvider.java @@ -0,0 +1,53 @@ +package com.arangodb.serde; + +import com.arangodb.ArangoDBException; +import com.arangodb.ContentType; +import com.arangodb.internal.serde.InternalSerdeProvider; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.Iterator; +import java.util.ServiceConfigurationError; +import java.util.ServiceLoader; + +public interface ArangoSerdeProvider { + + static ArangoSerdeProvider of(ContentType contentType) { + Logger LOG = LoggerFactory.getLogger(ArangoSerdeProvider.class); + + ServiceLoader loader = ServiceLoader.load(ArangoSerdeProvider.class); + ArangoSerdeProvider serdeProvider = null; + Iterator iterator = loader.iterator(); + while (iterator.hasNext()) { + ArangoSerdeProvider p; + try { + p = iterator.next(); + } catch (ServiceConfigurationError e) { + LOG.warn("ServiceLoader failed to load ArangoSerdeProvider", e); + continue; + } + if (contentType.equals(p.getContentType())) { + if (serdeProvider != null) { + throw new ArangoDBException("Found multiple serde providers! Please set explicitly the one to use."); + } + serdeProvider = p; + } + } + if (serdeProvider == null) { + LOG.warn("No ArangoSerdeProvider found, using InternalSerdeProvider. Please consider registering a custom " + + "ArangoSerdeProvider to avoid depending on internal classes which are not part of the public API."); + serdeProvider = new InternalSerdeProvider(contentType); + } + return serdeProvider; + } + + /** + * @return a new serde instance + */ + ArangoSerde create(); + + /** + * @return the supported content type + */ + ContentType getContentType(); +} diff --git a/core/src/main/java/com/arangodb/serde/InternalFrom.java b/core/src/main/java/com/arangodb/serde/InternalFrom.java new file mode 100644 index 000000000..1963a5fa9 --- /dev/null +++ b/core/src/main/java/com/arangodb/serde/InternalFrom.java @@ -0,0 +1,22 @@ +package com.arangodb.serde; + +import com.fasterxml.jackson.annotation.JacksonAnnotationsInside; +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonProperty; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * @author Michele Rastelli + * Annotation for `_from` field used by the InternalSerde. It works with shaded driver and relocated Jackson. + */ +@Target({ElementType.FIELD, ElementType.METHOD, ElementType.PARAMETER}) +@Retention(RetentionPolicy.RUNTIME) +@JacksonAnnotationsInside +@JsonProperty("_from") +@JsonInclude(JsonInclude.Include.NON_NULL) +public @interface InternalFrom { +} diff --git a/core/src/main/java/com/arangodb/serde/InternalId.java b/core/src/main/java/com/arangodb/serde/InternalId.java new file mode 100644 index 000000000..7ac590fbf --- /dev/null +++ b/core/src/main/java/com/arangodb/serde/InternalId.java @@ -0,0 +1,22 @@ +package com.arangodb.serde; + +import com.fasterxml.jackson.annotation.JacksonAnnotationsInside; +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonProperty; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * @author Michele Rastelli + * Annotation for `_id` field used by the InternalSerde. It works with shaded driver and relocated Jackson. + */ +@Target({ElementType.FIELD, ElementType.METHOD, ElementType.PARAMETER}) +@Retention(RetentionPolicy.RUNTIME) +@JacksonAnnotationsInside +@JsonProperty("_id") +@JsonInclude(JsonInclude.Include.NON_NULL) +public @interface InternalId { +} diff --git a/core/src/main/java/com/arangodb/serde/InternalKey.java b/core/src/main/java/com/arangodb/serde/InternalKey.java new file mode 100644 index 000000000..932ecc942 --- /dev/null +++ b/core/src/main/java/com/arangodb/serde/InternalKey.java @@ -0,0 +1,22 @@ +package com.arangodb.serde; + +import com.fasterxml.jackson.annotation.JacksonAnnotationsInside; +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonProperty; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * @author Michele Rastelli + * Annotation for `_key` field used by the InternalSerde. It works with shaded driver and relocated Jackson. + */ +@Target({ElementType.FIELD, ElementType.METHOD, ElementType.PARAMETER}) +@Retention(RetentionPolicy.RUNTIME) +@JacksonAnnotationsInside +@JsonProperty("_key") +@JsonInclude(JsonInclude.Include.NON_NULL) +public @interface InternalKey { +} diff --git a/core/src/main/java/com/arangodb/serde/InternalRev.java b/core/src/main/java/com/arangodb/serde/InternalRev.java new file mode 100644 index 000000000..c528d18e1 --- /dev/null +++ b/core/src/main/java/com/arangodb/serde/InternalRev.java @@ -0,0 +1,22 @@ +package com.arangodb.serde; + +import com.fasterxml.jackson.annotation.JacksonAnnotationsInside; +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonProperty; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * @author Michele Rastelli + * Annotation for `_rev` field used by the InternalSerde. It works with shaded driver and relocated Jackson. + */ +@Target({ElementType.FIELD, ElementType.METHOD, ElementType.PARAMETER}) +@Retention(RetentionPolicy.RUNTIME) +@JacksonAnnotationsInside +@JsonProperty("_rev") +@JsonInclude(JsonInclude.Include.NON_NULL) +public @interface InternalRev { +} diff --git a/core/src/main/java/com/arangodb/serde/InternalTo.java b/core/src/main/java/com/arangodb/serde/InternalTo.java new file mode 100644 index 000000000..7505a337b --- /dev/null +++ b/core/src/main/java/com/arangodb/serde/InternalTo.java @@ -0,0 +1,22 @@ +package com.arangodb.serde; + +import com.fasterxml.jackson.annotation.JacksonAnnotationsInside; +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonProperty; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * @author Michele Rastelli + * Annotation for `_to` field used by the InternalSerde. It works with shaded driver and relocated Jackson. + */ +@Target({ElementType.FIELD, ElementType.METHOD, ElementType.PARAMETER}) +@Retention(RetentionPolicy.RUNTIME) +@JacksonAnnotationsInside +@JsonProperty("_to") +@JsonInclude(JsonInclude.Include.NON_NULL) +public @interface InternalTo { +} diff --git a/core/src/main/java/com/arangodb/util/RawBytes.java b/core/src/main/java/com/arangodb/util/RawBytes.java new file mode 100644 index 000000000..9d4230a59 --- /dev/null +++ b/core/src/main/java/com/arangodb/util/RawBytes.java @@ -0,0 +1,49 @@ +package com.arangodb.util; + +import com.arangodb.internal.serde.InternalSerde; + +import java.util.Arrays; + +/** + * Helper class used to encapsulate raw value serialized as byte array. + * It can be used: + * - in serialization to append an already serialized raw value as is + * - in deserialization as target wrapper type for the raw value + *

+ * No validation is performed, the user is responsible for providing a valid byte array for the used content type. + *

+ * The raw value byte array can represent either: + * - a valid VPack + * - a valid JSON UTF-8 encoded string + *

+ * The driver's {@link InternalSerde} supports serializing and deserializing to and from + * {@code RawBytes}. + */ +public final class RawBytes implements RawData { + private final byte[] value; + + private RawBytes(final byte[] value) { + this.value = value; + } + + public static RawBytes of(final byte[] value) { + return new RawBytes(value); + } + + public byte[] get() { + return value; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + RawBytes rawBytes = (RawBytes) o; + return Arrays.equals(get(), rawBytes.get()); + } + + @Override + public int hashCode() { + return Arrays.hashCode(get()); + } +} diff --git a/core/src/main/java/com/arangodb/util/RawData.java b/core/src/main/java/com/arangodb/util/RawData.java new file mode 100644 index 000000000..73b5ffc66 --- /dev/null +++ b/core/src/main/java/com/arangodb/util/RawData.java @@ -0,0 +1,11 @@ +package com.arangodb.util; + +/** + * Marker interface for raw data, implementations are: + *

    + *
  • {@link RawBytes}
  • + *
  • {@link RawJson}
  • + *
+ */ +public interface RawData { +} diff --git a/core/src/main/java/com/arangodb/util/RawJson.java b/core/src/main/java/com/arangodb/util/RawJson.java new file mode 100644 index 000000000..a8f974506 --- /dev/null +++ b/core/src/main/java/com/arangodb/util/RawJson.java @@ -0,0 +1,43 @@ +package com.arangodb.util; + +import com.arangodb.internal.serde.InternalSerde; + +import java.util.Objects; + +/** + * Helper class used to encapsulate raw JSON string. + * It can be used: + * - in serialization to append a raw JSON node + * - in deserialization as target wrapper type for the raw JSON string + *

+ * The driver's {@link InternalSerde} supports serializing and deserializing to and from + * {@code RawJson}. + */ +public final class RawJson implements RawData { + private final String value; + + private RawJson(final String value) { + this.value = value; + } + + public static RawJson of(final String value) { + return new RawJson(value); + } + + public String get() { + return value; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + RawJson rawJson = (RawJson) o; + return Objects.equals(get(), rawJson.get()); + } + + @Override + public int hashCode() { + return Objects.hash(get()); + } +} diff --git a/src/main/java/com/arangodb/util/ArangoDeserializer.java b/core/src/main/java/com/arangodb/util/UnicodeUtils.java similarity index 52% rename from src/main/java/com/arangodb/util/ArangoDeserializer.java rename to core/src/main/java/com/arangodb/util/UnicodeUtils.java index 0ddfeba29..c6b63df91 100644 --- a/src/main/java/com/arangodb/util/ArangoDeserializer.java +++ b/core/src/main/java/com/arangodb/util/UnicodeUtils.java @@ -1,46 +1,52 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.util; - -import java.lang.reflect.Type; - -import com.arangodb.ArangoDBException; -import com.arangodb.velocypack.VPackSlice; - -/** - * @author Mark Vollmary - * - */ -public interface ArangoDeserializer { - - /** - * Deserialze a given VelocPack to an instance of a given type - * - * @param vpack - * The VelocyPack to deserialize - * @param type - * The target type to deserialize to. Use String for raw Json. - * @return The deserialized VelocyPack - * @throws ArangoDBException - */ - T deserialize(final VPackSlice vpack, final Type type) throws ArangoDBException; - -} +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.util; + +import java.text.Normalizer; +import java.util.Objects; + +/** + * @author Mark Vollmary + * @author Michele Rastelli + */ +public final class UnicodeUtils { + + private UnicodeUtils() { + } + + /** + * Normalizes a unicode string according to ArangoDB extended naming convention. + * + * @param value string to normalize + * @return NFC normalized string + */ + public static String normalize(final String value) { + if (value == null) { + return null; + } + return Normalizer.normalize(value, Normalizer.Form.NFC); + } + + public static boolean isNormalized(final String value) { + Objects.requireNonNull(value); + return value.equals(normalize(value)); + } +} diff --git a/dev-README.md b/dev-README.md new file mode 100644 index 000000000..0eaaa61f0 --- /dev/null +++ b/dev-README.md @@ -0,0 +1,38 @@ +# dev-README + +## Start DB +Single: +``` +./docker/start_db.sh +``` +Cluster: +``` +STARTER_MODE=cluster ./docker/start_db.sh +``` + +## SonarCloud +Check results [here](https://sonarcloud.io/project/overview?id=arangodb_arangodb-java-driver). + +## check dependencies updates +```shell +mvn versions:display-dependency-updates +mvn versions:display-plugin-updates +``` + +## Code Analysis +Analyze (Spotbugs and JaCoCo): +``` +mvn -Dgpg.skip=true -Dmaven.javadoc.skip=true -am -pl test-functional verify +mvn -Dgpg.skip=true -Dmaven.javadoc.skip=true -Dmaven.test.skip verify +``` +Reports: +- [core](core/target/site/jacoco/index.html) +- [jackson-serde-json](jackson-serde-json/target/site/jacoco/index.html) +- [jackson-serde-vpack](jackson-serde-vpack/target/site/jacoco/index.html) +- [http-protocol](http-protocol/target/site/jacoco/index.html) +- [vst-protocol](vst-protocol/target/site/jacoco/index.html) + +## update native image reflection configuration +To generate reflection configuration run [NativeImageHelper](./driver/src/test/java/helper/NativeImageHelper.java) and +copy the generated json to +[reflect-config.json](./driver/src/main/resources/META-INF/native-image/com.arangodb/arangodb-java-driver/reflect-config.json). diff --git a/docker/find_active_endpoint.sh b/docker/find_active_endpoint.sh new file mode 100755 index 000000000..70937448e --- /dev/null +++ b/docker/find_active_endpoint.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +GW=172.28.0.1 +COORDINATORS=("$GW:8529" "$GW:8539" "$GW:8549") + +for a in ${COORDINATORS[*]} ; do + if curl -u root:test --silent --fail "http://$a"; then + echo "$a" + exit 0 + fi +done + +echo "Could not find any active endpoint!" +exit 1 diff --git a/docker/foo.bin b/docker/foo.bin new file mode 100644 index 000000000..97c4296cd Binary files /dev/null and b/docker/foo.bin differ diff --git a/docker/jwtHeader b/docker/jwtHeader new file mode 100644 index 000000000..153e1b8a1 --- /dev/null +++ b/docker/jwtHeader @@ -0,0 +1 @@ +Authorization: bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJhcmFuZ29kYiIsInNlcnZlcl9pZCI6ImZvbyJ9.QmuhPHkmRPJuHGxsEqggHGRyVXikV44tb5YU_yWEvEM diff --git a/docker/jwtSecret b/docker/jwtSecret new file mode 100644 index 000000000..ea75728ba --- /dev/null +++ b/docker/jwtSecret @@ -0,0 +1 @@ +Averysecretword diff --git a/docker/server.pem b/docker/server.pem new file mode 100644 index 000000000..c97c30251 --- /dev/null +++ b/docker/server.pem @@ -0,0 +1,60 @@ +Bag Attributes + friendlyName: arangotest + localKeyID: 54 69 6D 65 20 31 36 30 34 32 35 36 36 37 39 38 35 34 +Key Attributes: +-----BEGIN PRIVATE KEY----- +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQC1WiDnd4+uCmMG +539ZNZB8NwI0RZF3sUSQGPx3lkqaFTZVEzMZL76HYvdc9Qg7difyKyQ09RLSpMAL +X9euSseD7bZGnfQH52BnKcT09eQ3wh7aVQ5sN2omygdHLC7X9usntxAfv7Nzmvdo +gNXoJQyY/hSZff7RIqWH8NnAUKkjqOe6Bf5LDbxHKESmrFBxOCOnhcpvZWetwpiR +dJVPwUn5P82CAZzfiBfmBZnB7D0l+/6Cv4jMuH26uAIcixnVekBQzl1RgwczuiZf +2MGO64vDMMJJWE9ClZF1uQuQrwXF6qwhuP1Hnkii6wNbTtPWlGSkqeutr004+Hzb +f8KnRY4PAgMBAAECggEAKi1d/bdW2TldMpvgiFTm15zLjHCpllbKBWFqRj3T9+X7 +Duo6Nh9ehopD0YDDe2DNhYr3DsH4sLjUWVDfDpAhutMsU1wlBzmOuC+EuRv/CeDB +4DFr+0sgCwlti+YAtwWcR05SF7A0Ai0GYW2lUipbtbFSBSjCfM08BlPDsPCRhdM8 +DhBn3S45aP7oC8BdhG/etg+DfXW+/nyNwEcMCYG97bzXNjzYpCQjo/bTHdh2UPYM +4WEAqFzZ5jir8LVS3v7GqpqPmk6FnHJOJpfpOSZoPqnfpIw7SVlNsXHvDaHGcgYZ +Xec7rLQlBuv4RZU7OlGJpK2Ng5kvS9q3nfqqn7YIMQKBgQDqSsYnE+k6CnrSpa2W +B9W/+PChITgkA46XBUUjAueJ7yVZQQEOzl0VI6RoVBp3t66eO8uM9omO8/ogHXku +Ei9UUIIfH4BsSP7G5A06UC/FgReDxwBfbRuS+lupnmc348vPDkFlJZ4hDgWflNev +7tpUbljSAqUea1VhdBy146V4qwKBgQDGJ6iL1+A9uUM+1UklOAPpPhTQ8ZQDRCj7 +7IMVcbzWYvCMuVNXzOWuiz+VYr3IGCJZIbxbFDOHxGF4XKJnk0vm1qhQQME0PtAF +i1jIfsxpj8KKJl9Uad+XLQCYRV8mIZlhsd/ErRJuz6FyqevKH3nFIb0ggF3x2d06 +odTHuj4ILQKBgCUsI/BDSne4/e+59aaeK52/w33tJVkhb1gqr+N0LIRH+ycEF0Tg +HQijlQwwe9qOvBfC6PK+kuipcP/zbSyQGg5Ij7ycZOXJVxL7T9X2rv2pE7AGvNpn +Fz7klfJ9fWbyr310h4+ivkoETYQaO3ZgcSeAMntvi/8djHhf0cZSDgjtAoGBAKvQ +TUNcHjJGxfjgRLkB1dpSmwgEv7sJSaQOkiZw5TTauwq50nsJzYlHcg1cfYPW8Ulp +iAFNBdVNwNn1MFgwjpqMO4rCawObBxIXnhbSYvmQzjStSvFNj7JsMdzWIcdVUMI1 +0fmdu6LbY3ihvzIVkqcMNwnMZCjFKB6jnXTElu7NAoGAS0gNPD/bfzWAhZBBYp9/ +SLGOvjHKrSVWGwDiqdAGuh6xg+1C3F+XpiITP6d3Wv3PCJ/Gia5isQPSMaXG+xTt +6huBgFlksHqr0tsQA9dcgGW7BDr5VhRq5/WinaLhGGy1R+i2zbDmQXgHbCO+RH/s +bD9F4LZ3RoXmGHLW0IUggPw= +-----END PRIVATE KEY----- +Bag Attributes + friendlyName: arangotest + localKeyID: 54 69 6D 65 20 31 36 30 34 32 35 36 36 37 39 38 35 34 +subject=C = Unknown, ST = Unknown, L = Unknown, O = Unknown, OU = Unknown, CN = localhost + +issuer=C = Unknown, ST = Unknown, L = Unknown, O = Unknown, OU = Unknown, CN = localhost + +-----BEGIN CERTIFICATE----- +MIIDezCCAmOgAwIBAgIEeDCzXzANBgkqhkiG9w0BAQsFADBuMRAwDgYDVQQGEwdV +bmtub3duMRAwDgYDVQQIEwdVbmtub3duMRAwDgYDVQQHEwdVbmtub3duMRAwDgYD +VQQKEwdVbmtub3duMRAwDgYDVQQLEwdVbmtub3duMRIwEAYDVQQDEwlsb2NhbGhv +c3QwHhcNMjAxMTAxMTg1MTE5WhcNMzAxMDMwMTg1MTE5WjBuMRAwDgYDVQQGEwdV +bmtub3duMRAwDgYDVQQIEwdVbmtub3duMRAwDgYDVQQHEwdVbmtub3duMRAwDgYD +VQQKEwdVbmtub3duMRAwDgYDVQQLEwdVbmtub3duMRIwEAYDVQQDEwlsb2NhbGhv +c3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC1WiDnd4+uCmMG539Z +NZB8NwI0RZF3sUSQGPx3lkqaFTZVEzMZL76HYvdc9Qg7difyKyQ09RLSpMALX9eu +SseD7bZGnfQH52BnKcT09eQ3wh7aVQ5sN2omygdHLC7X9usntxAfv7NzmvdogNXo +JQyY/hSZff7RIqWH8NnAUKkjqOe6Bf5LDbxHKESmrFBxOCOnhcpvZWetwpiRdJVP +wUn5P82CAZzfiBfmBZnB7D0l+/6Cv4jMuH26uAIcixnVekBQzl1RgwczuiZf2MGO +64vDMMJJWE9ClZF1uQuQrwXF6qwhuP1Hnkii6wNbTtPWlGSkqeutr004+Hzbf8Kn +RY4PAgMBAAGjITAfMB0GA1UdDgQWBBTBrv9Awynt3C5IbaCNyOW5v4DNkTANBgkq +hkiG9w0BAQsFAAOCAQEAIm9rPvDkYpmzpSIhR3VXG9Y71gxRDrqkEeLsMoEyqGnw +/zx1bDCNeGg2PncLlW6zTIipEBooixIE9U7KxHgZxBy0Et6EEWvIUmnr6F4F+dbT +D050GHlcZ7eOeqYTPYeQC502G1Fo4tdNi4lDP9L9XZpf7Q1QimRH2qaLS03ZFZa2 +tY7ah/RQqZL8Dkxx8/zc25sgTHVpxoK853glBVBs/ENMiyGJWmAXQayewY3EPt/9 +wGwV4KmU3dPDleQeXSUGPUISeQxFjy+jCw21pYviWVJTNBA9l5ny3GhEmcnOT/gQ +HCvVRLyGLMbaMZ4JrPwb+aAtBgrgeiK4xeSMMvrbhw== +-----END CERTIFICATE----- diff --git a/docker/start_db.sh b/docker/start_db.sh new file mode 100755 index 000000000..e8c58ebcd --- /dev/null +++ b/docker/start_db.sh @@ -0,0 +1,119 @@ +#!/bin/bash + +# Configuration environment variables: +# STARTER_MODE: (single|cluster|activefailover), default single +# DOCKER_IMAGE: ArangoDB docker image, default docker.io/arangodb/arangodb:latest +# STARTER_DOCKER_IMAGE: ArangoDB Starter docker image, default docker.io/arangodb/arangodb-starter:latest +# SSL: (true|false), default false +# ARANGO_LICENSE_KEY: only required for ArangoDB Enterprise + +# EXAMPLE: +# STARTER_MODE=cluster SSL=true ./start_db.sh + +STARTER_MODE=${STARTER_MODE:=single} +DOCKER_IMAGE=${DOCKER_IMAGE:=docker.io/arangodb/arangodb:latest} +STARTER_DOCKER_IMAGE=${STARTER_DOCKER_IMAGE:=docker.io/arangodb/arangodb-starter:latest} +SSL=${SSL:=false} +COMPRESSION=${COMPRESSION:=false} + +GW=172.28.0.1 +docker network create arangodb --subnet 172.28.0.0/16 + +# exit when any command fails +set -e + +docker pull $STARTER_DOCKER_IMAGE +docker pull $DOCKER_IMAGE + +LOCATION=$(pwd)/$(dirname "$0") +AUTHORIZATION_HEADER=$(cat "$LOCATION"/jwtHeader) + +STARTER_ARGS= +SCHEME=http +ARANGOSH_SCHEME=http+tcp +COORDINATORS=("$GW:8529" "$GW:8539" "$GW:8549") + +if [ "$STARTER_MODE" == "single" ]; then + COORDINATORS=("$GW:8529") +fi + +if [ "$SSL" == "true" ]; then + STARTER_ARGS="$STARTER_ARGS --ssl.keyfile=/data/server.pem" + SCHEME=https + ARANGOSH_SCHEME=http+ssl +fi + +if [ "$COMPRESSION" == "true" ]; then + STARTER_ARGS="${STARTER_ARGS} --all.http.compress-response-threshold=1" +fi + +# data volume +docker create -v /data --name arangodb-data alpine:3 /bin/true +docker cp "$LOCATION"/jwtSecret arangodb-data:/data +docker cp "$LOCATION"/server.pem arangodb-data:/data + +docker run -d \ + --name=adb \ + -p 8528:8528 \ + --volumes-from arangodb-data \ + -v /var/run/docker.sock:/var/run/docker.sock \ + -e ARANGO_LICENSE_KEY="$ARANGO_LICENSE_KEY" \ + $STARTER_DOCKER_IMAGE \ + $STARTER_ARGS \ + --docker.net-mode=default \ + --docker.container=adb \ + --auth.jwt-secret=/data/jwtSecret \ + --starter.address="${GW}" \ + --docker.image="${DOCKER_IMAGE}" \ + --starter.local --starter.mode=${STARTER_MODE} --all.log.level=debug --all.log.output=+ --log.verbose \ + --all.server.descriptors-minimum=1024 --all.javascript.allow-admin-execute=true --all.server.maximal-threads=128 + + +wait_server() { + # shellcheck disable=SC2091 + until $(curl --output /dev/null --insecure --fail --silent --head -i -H "$AUTHORIZATION_HEADER" "$SCHEME://$1/_api/version"); do + printf '.' + sleep 1 + done +} + +echo "Waiting..." + +for a in ${COORDINATORS[*]} ; do + wait_server "$a" +done + +set +e +for a in ${COORDINATORS[*]} ; do + echo "" + echo "Setting username and password..." + docker run --rm ${DOCKER_IMAGE} arangosh --server.endpoint="$ARANGOSH_SCHEME://$a" --server.authentication=false --javascript.execute-string='require("org/arangodb/users").update("root", "test")' +done +set -e + +for a in ${COORDINATORS[*]} ; do + echo "" + echo "Requesting endpoint version..." + curl -u root:test --insecure --fail "$SCHEME://$a/_api/version" +done + +echo "" +echo "" +echo "Copying test ML models into containers..." +for c in $(docker ps -a -f name=adb-.* -q) ; do + docker cp "$LOCATION"/foo.bin "$c":/tmp +done + +echo "" +echo "" +echo "Done, your deployment is reachable at: " +for a in ${COORDINATORS[*]} ; do + echo "$SCHEME://$a" + echo "" +done + +if [ "$STARTER_MODE" == "activefailover" ]; then + LEADER=$("$LOCATION"/find_active_endpoint.sh) + echo "Leader: $SCHEME://$LEADER" + echo "" +fi diff --git a/docker/start_proxy.sh b/docker/start_proxy.sh new file mode 100755 index 000000000..b4e938684 --- /dev/null +++ b/docker/start_proxy.sh @@ -0,0 +1,8 @@ +#!/bin/bash + +docker run -d \ + -e LOG_LEVEL=Info \ + -e AUTH_USER=user \ + -e AUTH_PASSWORD=password \ + --network=arangodb -p 8888:8888 \ + docker.io/kalaksi/tinyproxy:1.7 diff --git a/driver/pom.xml b/driver/pom.xml new file mode 100644 index 000000000..290e83902 --- /dev/null +++ b/driver/pom.xml @@ -0,0 +1,64 @@ + + + 4.0.0 + + + ../release-parent + com.arangodb + release-parent + 7.22.0 + + + arangodb-java-driver + arangodb-java-driver + ArangoDB Java Driver + + + com.arangodb.driver + src/test/**/* + + + + + + org.apache.maven.plugins + maven-javadoc-plugin + + true + + + + + + + + com.arangodb + core + compile + + + com.arangodb + http-protocol + compile + + + com.arangodb + jackson-serde-json + compile + + + com.google.code.findbugs + jsr305 + provided + + + org.reflections + reflections + 0.10.2 + test + + + + diff --git a/driver/src/main/resources/META-INF/native-image/com.arangodb/arangodb-java-driver/native-image.properties b/driver/src/main/resources/META-INF/native-image/com.arangodb/arangodb-java-driver/native-image.properties new file mode 100644 index 000000000..f60b51cea --- /dev/null +++ b/driver/src/main/resources/META-INF/native-image/com.arangodb/arangodb-java-driver/native-image.properties @@ -0,0 +1,5 @@ +Args=\ +-H:ResourceConfigurationResources=${.}/resource-config.json,${.}/resource-config-spi.json \ +-H:ReflectionConfigurationResources=${.}/reflect-config.json,${.}/reflect-config-serde.json,${.}/reflect-config-spi.json,${.}/reflect-config-mp-config.json \ +-H:SerializationConfigurationResources=${.}/serialization-config.json \ +-H:DynamicProxyConfigurationResources=${.}/proxy-config.json diff --git a/driver/src/main/resources/META-INF/native-image/com.arangodb/arangodb-java-driver/proxy-config.json b/driver/src/main/resources/META-INF/native-image/com.arangodb/arangodb-java-driver/proxy-config.json new file mode 100644 index 000000000..7453e1289 --- /dev/null +++ b/driver/src/main/resources/META-INF/native-image/com.arangodb/arangodb-java-driver/proxy-config.json @@ -0,0 +1,26 @@ +[ + { + "interfaces":["com.arangodb.internal.serde.JacksonUtils$JsonFactory"] + }, + { + "interfaces":["com.arangodb.internal.serde.JacksonUtils$StreamReadConstraints"] + }, + { + "interfaces":["com.arangodb.internal.serde.JacksonUtils$StreamReadConstraints$Builder"] + }, + { + "interfaces":["com.arangodb.internal.serde.JacksonUtils$StreamReadConstraints$Static"] + }, + { + "interfaces":["com.arangodb.internal.serde.JacksonUtils$StreamWriteConstraints"] + }, + { + "interfaces":["com.arangodb.internal.serde.JacksonUtils$StreamWriteConstraints$Builder"] + }, + { + "interfaces":["com.arangodb.internal.serde.JacksonUtils$StreamWriteConstraints$Static"] + }, + { + "interfaces":["com.arangodb.internal.serde.JacksonUtils$Version"] + } +] diff --git a/driver/src/main/resources/META-INF/native-image/com.arangodb/arangodb-java-driver/reflect-config-mp-config.json b/driver/src/main/resources/META-INF/native-image/com.arangodb/arangodb-java-driver/reflect-config-mp-config.json new file mode 100644 index 000000000..84892c37d --- /dev/null +++ b/driver/src/main/resources/META-INF/native-image/com.arangodb/arangodb-java-driver/reflect-config-mp-config.json @@ -0,0 +1,13 @@ +[ + { + "name": "com.arangodb.config.HostDescription", + "methods": [ + { + "name": "parse", + "parameterTypes": [ + "java.lang.CharSequence" + ] + } + ] + } +] \ No newline at end of file diff --git a/driver/src/main/resources/META-INF/native-image/com.arangodb/arangodb-java-driver/reflect-config-serde.json b/driver/src/main/resources/META-INF/native-image/com.arangodb/arangodb-java-driver/reflect-config-serde.json new file mode 100644 index 000000000..c50a5e113 --- /dev/null +++ b/driver/src/main/resources/META-INF/native-image/com.arangodb/arangodb-java-driver/reflect-config-serde.json @@ -0,0 +1,146 @@ +[ + { + "name": "com.arangodb.internal.serde.JacksonUtils$JsonFactory", + "queryAllDeclaredMethods": true + }, + { + "name": "com.arangodb.internal.serde.JacksonUtils$StreamReadConstraints", + "queryAllDeclaredMethods": true + }, + { + "name": "com.arangodb.internal.serde.JacksonUtils$StreamReadConstraints$Builder", + "queryAllDeclaredMethods": true + }, + { + "name": "com.arangodb.internal.serde.JacksonUtils$StreamReadConstraints$Static", + "queryAllDeclaredMethods": true + }, + { + "name": "com.arangodb.internal.serde.JacksonUtils$StreamWriteConstraints", + "queryAllDeclaredMethods": true + }, + { + "name": "com.arangodb.internal.serde.JacksonUtils$StreamWriteConstraints$Builder", + "queryAllDeclaredMethods": true + }, + { + "name": "com.arangodb.internal.serde.JacksonUtils$StreamWriteConstraints$Static", + "queryAllDeclaredMethods": true + }, + { + "name": "com.arangodb.internal.serde.JacksonUtils$Version", + "queryAllDeclaredMethods": true + }, + { + "name": "com.fasterxml.jackson.core.JsonFactory", + "queryAllPublicMethods": true, + "methods": [ + { + "name": "setStreamReadConstraints", + "parameterTypes": [ + "com.fasterxml.jackson.core.StreamReadConstraints" + ] + }, + { + "name": "setStreamWriteConstraints", + "parameterTypes": [ + "com.fasterxml.jackson.core.StreamWriteConstraints" + ] + }, + { + "name": "version", + "parameterTypes": [] + } + ] + }, + { + "name": "com.fasterxml.jackson.core.StreamReadConstraints", + "queryAllPublicMethods": true, + "methods": [ + { + "name": "builder", + "parameterTypes": [] + } + ] + }, + { + "name": "com.fasterxml.jackson.core.StreamReadConstraints$Builder", + "queryAllPublicMethods": true, + "methods": [ + { + "name": "build", + "parameterTypes": [] + }, + { + "name": "maxDocumentLength", + "parameterTypes": [ + "long" + ] + }, + { + "name": "maxNameLength", + "parameterTypes": [ + "int" + ] + }, + { + "name": "maxNestingDepth", + "parameterTypes": [ + "int" + ] + }, + { + "name": "maxNumberLength", + "parameterTypes": [ + "int" + ] + }, + { + "name": "maxStringLength", + "parameterTypes": [ + "int" + ] + } + ] + }, + { + "name": "com.fasterxml.jackson.core.StreamWriteConstraints", + "queryAllPublicMethods": true, + "methods": [ + { + "name": "builder", + "parameterTypes": [] + } + ] + }, + { + "name": "com.fasterxml.jackson.core.StreamWriteConstraints$Builder", + "queryAllPublicMethods": true, + "methods": [ + { + "name": "build", + "parameterTypes": [] + }, + { + "name": "maxNestingDepth", + "parameterTypes": [ + "int" + ] + } + ] + }, + { + "name": "com.fasterxml.jackson.core.Version", + "queryAllPublicMethods": true, + "methods": [ + { + "name": "getMajorVersion", + "parameterTypes": [] + }, + { + "name": "getMinorVersion", + "parameterTypes": [] + } + ] + } +] \ No newline at end of file diff --git a/driver/src/main/resources/META-INF/native-image/com.arangodb/arangodb-java-driver/reflect-config-spi.json b/driver/src/main/resources/META-INF/native-image/com.arangodb/arangodb-java-driver/reflect-config-spi.json new file mode 100644 index 000000000..cf082f1dd --- /dev/null +++ b/driver/src/main/resources/META-INF/native-image/com.arangodb/arangodb-java-driver/reflect-config-spi.json @@ -0,0 +1,11 @@ +[ + { + "name": "com.fasterxml.jackson.core.JsonFactory", + "methods": [ + { + "name": "", + "parameterTypes": [] + } + ] + } +] diff --git a/driver/src/main/resources/META-INF/native-image/com.arangodb/arangodb-java-driver/reflect-config.json b/driver/src/main/resources/META-INF/native-image/com.arangodb/arangodb-java-driver/reflect-config.json new file mode 100644 index 000000000..94919ac94 --- /dev/null +++ b/driver/src/main/resources/META-INF/native-image/com.arangodb/arangodb-java-driver/reflect-config.json @@ -0,0 +1,1652 @@ +[ + { + "name": "com.arangodb.internal.serde.InternalSerializers$FieldLinksSerializer", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.internal.serde.InternalSerializers$CollectionLinksSerializer", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.internal.serde.InternalSerializers$CollectionSchemaRuleSerializer", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.internal.serde.InternalDeserializers$CollectionLinksDeserializer", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.internal.serde.InternalDeserializers$CollectionSchemaRuleDeserializer", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.internal.serde.InternalDeserializers$FieldLinksDeserializer", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.ViewEntity", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.BaseDocument", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.CollectionEntity", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.ReplicationFactor", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.AbstractBaseDocument", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.DocumentEntity", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.SearchAnalyzer", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.SearchAliasPropertiesEntity", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.ArangoSearchPropertiesEntity", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.BaseEdgeDocument", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.ServerRole", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.ReplicationFactor$SatelliteReplicationFactor", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.SegmentationAnalyzerProperties$BreakMode", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.AnalyzerFeature", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.ArangoDBEngine$StorageEngineName", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.SearchAliasIndex$OperationType", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.StreamTransactionStatus", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.QueryExecutionState", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.LogLevel", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.QueryCachePropertiesEntity$CacheMode", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.LogLevelEntity$LogLevel", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.StoreValuesType", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.LoadBalancingStrategy", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.InvertedIndexPrimarySort$Field$Direction", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.IndexType", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.ArangoSearchCompression", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.License", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.CollectionStatus", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.SearchAnalyzerCase", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.AQLAnalyzerProperties$ReturnType", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.StreamType", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.CollectionType", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.KeyType", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.GeoS2AnalyzerProperties$GeoS2Format", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.ViewType", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.ServerMode", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.AnalyzerType", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.GeoJSONAnalyzerProperties$GeoJSONAnalyzerType", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.ShardingStrategy", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.ConsolidationType", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.GeoS2AnalyzerProperties$GeoS2AnalyzerType", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.Permissions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.CollectionRevisionEntity", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.CollectionPropertiesEntity", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.QueryOptimizerRule", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.InvertedIndexPrimarySort$Field", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.GeoJSONAnalyzerProperties", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.GeoPointAnalyzerProperties", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.CursorEntity", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.ClassificationAnalyzerProperties", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.SearchAliasIndex", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.GeoS2AnalyzerProperties", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.IndexEntity", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.AqlExecutionExplainEntity", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.AqlParseEntity", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.EdgeDefinition", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.MultiDocumentEntity", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.AqlQueryExplainEntity$ExecutionStats", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.StopwordsAnalyzerProperties", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.AqlExecutionExplainEntity$ExecutionVariable", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.QueryCachePropertiesEntity", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.CursorStats", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.GraphEntity", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.AqlFunctionEntity", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.DatabaseEntity", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.PrimarySort", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.EdgeNgram", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.AqlExecutionExplainEntity$ExecutionNode", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.AqlQueryExplainEntity$ExecutionVariable", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.TransactionEntity", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.AqlQueryExplainEntity", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.ArangoDBEngine", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.UserEntity", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.ErrorEntity", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.DelimiterAnalyzerProperties", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.AqlExecutionExplainEntity$ExecutionStats", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.FieldLink", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.AqlQueryExplainEntity$ExecutionNode", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.AqlParseEntity$AstNode", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.GeoAnalyzerOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.ShardEntity", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.QueryOptimizerRule$Flags", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.AqlExecutionExplainEntity$ExecutionCollection", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.NearestNeighborsAnalyzerProperties", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.MultiDelimiterAnalyzerProperties", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.PipelineAnalyzerProperties", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.ReplicationFactor$NumericReplicationFactor", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.TextAnalyzerProperties", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.EdgeDefinition$Options", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.ArangoDBVersion", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.SegmentationAnalyzerProperties", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.AQLAnalyzerProperties", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.ConsolidationPolicy", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.MinHashAnalyzerProperties", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.AqlQueryExplainEntity$ExecutionPlan", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.NormAnalyzerProperties", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.AqlQueryExplainEntity$ExecutionCollection", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.KeyOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.AqlExecutionExplainEntity$ExecutionPlan", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.LogLevelEntity", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.NGramAnalyzerProperties", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.CursorWarning", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.StemAnalyzerProperties", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.WildcardAnalyzerProperties", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.StreamTransactionEntity", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.LogEntriesEntity", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.QueryEntity", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.CollectionLink", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.StoredValue", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.DocumentImportEntity", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.CollationAnalyzerProperties", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.InvertedIndexPrimarySort", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.QueryTrackingPropertiesEntity", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.CursorEntity$Extras", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.LogEntriesEntity$Message", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.InvertedIndexEntity", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.InvertedIndexField", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.AqlExecutionExplainEntity$ExecutionExpression", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.EdgeUpdateEntity", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.DocumentCreateEntity", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.VertexUpdateEntity", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.DocumentDeleteEntity", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.VertexEntity", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.DocumentUpdateEntity", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.EdgeEntity", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.MultiDelimiterAnalyzer", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.SegmentationAnalyzer", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.IdentityAnalyzer", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.WildcardAnalyzer", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.GeoPointAnalyzer", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.AQLAnalyzer", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.GeoJSONAnalyzer", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.GeoS2Analyzer", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.PipelineAnalyzer", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.NearestNeighborsAnalyzer", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.MinHashAnalyzer", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.ClassificationAnalyzer", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.StemAnalyzer", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.NormAnalyzer", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.DelimiterAnalyzer", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.CollationAnalyzer", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.TextAnalyzer", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.NGramAnalyzer", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.StopwordsAnalyzer", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.ServerRole", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.ReplicationFactor$SatelliteReplicationFactor", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.SegmentationAnalyzerProperties$BreakMode", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.AnalyzerFeature", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.ArangoDBEngine$StorageEngineName", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.SearchAliasIndex$OperationType", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.StreamTransactionStatus", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.QueryExecutionState", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.LogLevel", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.QueryCachePropertiesEntity$CacheMode", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.LogLevelEntity$LogLevel", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.StoreValuesType", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.LoadBalancingStrategy", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.InvertedIndexPrimarySort$Field$Direction", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.IndexType", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.ArangoSearchCompression", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.License", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.CollectionStatus", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.SearchAnalyzerCase", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.AQLAnalyzerProperties$ReturnType", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.StreamType", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.CollectionType", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.KeyType", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.GeoS2AnalyzerProperties$GeoS2Format", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.ViewType", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.ServerMode", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.AnalyzerType", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.GeoJSONAnalyzerProperties$GeoJSONAnalyzerType", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.ShardingStrategy", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.ConsolidationType", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.GeoS2AnalyzerProperties$GeoS2AnalyzerType", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.Permissions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.TransactionalOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.IndexOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.AbstractMDIndexOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.AqlQueryOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.AqlQueryOptions$Optimizer", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.AqlQueryOptions$Options", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.DocumentImportOptions$OnDuplicate", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.MDIFieldValueTypes", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.OverwriteMode", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.ImportType", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.LogOptions$SortOrder", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.ComputedValue$ComputeOn", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.CollectionSchema$Level", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.ZKDIndexOptions$FieldValueTypes", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.DocumentReadOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.CollectionCountOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.DocumentDeleteOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.EdgeDeleteOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.VertexCreateOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.EdgeReplaceOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.VertexReplaceOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.DocumentUpdateOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.GraphDocumentReadOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.EdgeUpdateOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.VertexUpdateOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.DocumentExistsOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.DocumentReplaceOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.VertexDeleteOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.EdgeCreateOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.CollectionTruncateOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.DocumentCreateOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.VertexCollectionDropOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.LogLevelOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.DBCreateOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.AqlFunctionDeleteOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.arangosearch.AnalyzerDeleteOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.StreamTransactionOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.UserUpdateOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.DatabaseUsersOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.ViewRenameOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.arangosearch.SearchAliasPropertiesOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.EdgeCollectionDropOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.CollectionCreateOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.AqlQueryExplainOptions$Options", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.ViewCreateOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.VertexCollectionCreateOptions$Options", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.arangosearch.ArangoSearchOptionsBuilder", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.AqlQueryParseOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.UserAccessOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.ComputedValue", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.EdgeCollectionRemoveOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.arangosearch.ArangoSearchPropertiesOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.VertexCollectionCreateOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.arangosearch.ArangoSearchCreateOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.GraphCreateOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.CollectionsReadOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.TransactionOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.DocumentImportOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.CollectionSchema", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.OptionsBuilder", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.TransactionCollectionOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.ReplaceEdgeDefinitionOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.LogOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.CollectionPropertiesOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.UserCreateOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.DatabaseOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.VertexCollectionRemoveOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.GraphCreateOptions$SmartOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.arangosearch.SearchAliasOptionsBuilder", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.CollectionRenameOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.arangosearch.SearchAliasCreateOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.AqlFunctionGetOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.AqlFunctionCreateOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.AqlQueryExplainOptions$Optimizer", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.AqlQueryExplainOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.ExplainAqlQueryOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.QueueTimeSample", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.GeoIndexOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.TtlIndexOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.FulltextIndexOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.ZKDIndexOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.InvertedIndexOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.PersistentIndexOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.MDIndexOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.MDPrefixedIndexOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.DocumentImportOptions$OnDuplicate", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.MDIFieldValueTypes", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.OverwriteMode", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.ImportType", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.LogOptions$SortOrder", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.ComputedValue$ComputeOn", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.CollectionSchema$Level", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.ZKDIndexOptions$FieldValueTypes", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + } +] \ No newline at end of file diff --git a/driver/src/main/resources/META-INF/native-image/com.arangodb/arangodb-java-driver/resource-config-spi.json b/driver/src/main/resources/META-INF/native-image/com.arangodb/arangodb-java-driver/resource-config-spi.json new file mode 100644 index 000000000..7fb12eab0 --- /dev/null +++ b/driver/src/main/resources/META-INF/native-image/com.arangodb/arangodb-java-driver/resource-config-spi.json @@ -0,0 +1,10 @@ +{ + "resources": { + "includes": [ + { + "pattern": "META-INF/services/com.fasterxml.jackson.core.JsonFactory" + } + ] + }, + "bundles": [] +} diff --git a/driver/src/main/resources/META-INF/native-image/com.arangodb/arangodb-java-driver/resource-config.json b/driver/src/main/resources/META-INF/native-image/com.arangodb/arangodb-java-driver/resource-config.json new file mode 100644 index 000000000..3bb430bb1 --- /dev/null +++ b/driver/src/main/resources/META-INF/native-image/com.arangodb/arangodb-java-driver/resource-config.json @@ -0,0 +1,10 @@ +{ + "resources": { + "includes": [ + { + "pattern": "META-INF/vertx/vertx-version.txt" + } + ] + }, + "bundles": [] +} diff --git a/driver/src/main/resources/META-INF/native-image/com.arangodb/arangodb-java-driver/serialization-config.json b/driver/src/main/resources/META-INF/native-image/com.arangodb/arangodb-java-driver/serialization-config.json new file mode 100644 index 000000000..e5d77727d --- /dev/null +++ b/driver/src/main/resources/META-INF/native-image/com.arangodb/arangodb-java-driver/serialization-config.json @@ -0,0 +1,26 @@ +[ + { + "name": "com.arangodb.entity.ErrorEntity" + }, + { + "name": "com.arangodb.ArangoDBException" + }, + { + "name": "com.arangodb.ArangoDBMultipleException" + }, + { + "name": "com.arangodb.internal.net.ArangoDBRedirectException" + }, + { + "name": "com.arangodb.entity.AbstractBaseDocument" + }, + { + "name": "com.arangodb.entity.BaseDocument" + }, + { + "name": "com.arangodb.entity.BaseEdgeDocument" + }, + { + "name": "java.util.HashMap" + } +] diff --git a/driver/src/test/java/helper/NativeImageHelper.java b/driver/src/test/java/helper/NativeImageHelper.java new file mode 100644 index 000000000..670633aa8 --- /dev/null +++ b/driver/src/test/java/helper/NativeImageHelper.java @@ -0,0 +1,91 @@ +package helper; + +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.JsonDeserializer; +import com.fasterxml.jackson.databind.JsonSerializer; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.node.ArrayNode; +import com.fasterxml.jackson.databind.node.ObjectNode; +import org.reflections.Reflections; +import org.reflections.scanners.SubTypesScanner; +import org.reflections.util.ClasspathHelper; +import org.reflections.util.ConfigurationBuilder; +import org.reflections.util.FilterBuilder; + +import java.net.URL; +import java.util.Arrays; +import java.util.Collection; +import java.util.List; +import java.util.stream.Stream; + +/** + * Helper scripts to generate GraalVM native image configuration + * + * @author Michele Rastelli + */ +public class NativeImageHelper { + public static void main(String[] args) throws JsonProcessingException { + generateReflectConfig(); + } + + private static void generateReflectConfig() throws JsonProcessingException { + System.out.println("---------------------------"); + System.out.println("--- reflect-config.json ---"); + System.out.println("---------------------------"); + + List packages = Arrays.asList( + "com.arangodb.entity", + "com.arangodb.model", + "com.arangodb.internal.cursor.entity" + ); + + ObjectMapper mapper = new ObjectMapper(); + ArrayNode rootNode = mapper.createArrayNode(); + + String internalSerdePackage = "com.arangodb.internal.serde"; + Collection serdeUrls = ClasspathHelper.forPackage(internalSerdePackage); + Reflections r = new Reflections(new ConfigurationBuilder() + .setScanners(new SubTypesScanner(false)) + .setUrls(serdeUrls) + .filterInputsBy(new FilterBuilder().includePackage(internalSerdePackage))); + Stream serializers = r.getSubTypesOf(JsonSerializer.class).stream() + .filter(it -> !it.isAnonymousClass()) + .map(Class::getName); + Stream deserializers = r.getSubTypesOf(JsonDeserializer.class).stream() + .filter(it -> !it.isAnonymousClass()) + .map(Class::getName); + Stream serdeClasses = Stream.concat(serializers, deserializers) + .filter(it -> it.contains("InternalSerializers") || it.contains("InternalDeserializers")); + + Stream entityClasses = packages.stream() + .flatMap(p -> { + final ConfigurationBuilder config = new ConfigurationBuilder() + .setScanners(new SubTypesScanner(false)) + .setUrls(ClasspathHelper.forPackage(p)) + .filterInputsBy(new FilterBuilder().includePackage(p)); + + Reflections reflections = new Reflections(config); + return Stream.concat( + reflections.getAllTypes().stream(), + reflections + .getSubTypesOf(Enum.class) + .stream() + .map(Class::getName) + ); + }); + Stream.concat(serdeClasses, entityClasses) + .filter(className -> className.startsWith("com.arangodb")) + .map(className -> { + ObjectNode entry = mapper.createObjectNode(); + entry.put("name", className); + entry.put("allDeclaredFields", true); + entry.put("allDeclaredMethods", true); + entry.put("allDeclaredConstructors", true); + return entry; + }) + .forEach(rootNode::add); + + String jsonString = mapper.writerWithDefaultPrettyPrinter().writeValueAsString(rootNode); + System.out.println(jsonString); + } +} diff --git a/formatter.xml b/formatter.xml deleted file mode 100644 index a8fe8b3a4..000000000 --- a/formatter.xml +++ /dev/null @@ -1,313 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/http-protocol/pom.xml b/http-protocol/pom.xml new file mode 100644 index 000000000..006d9413b --- /dev/null +++ b/http-protocol/pom.xml @@ -0,0 +1,35 @@ + + + 4.0.0 + + + ../release-parent + com.arangodb + release-parent + 7.22.0 + + + http-protocol + http-protocol + HTTP Protocol module for ArangoDB Java Driver + + + com.arangodb.http + + + + + com.arangodb + core + provided + + + io.vertx + vertx-web-client + compile + + + + \ No newline at end of file diff --git a/http-protocol/src/main/java/com/arangodb/http/HttpCommunication.java b/http-protocol/src/main/java/com/arangodb/http/HttpCommunication.java new file mode 100644 index 000000000..1cbea2b2e --- /dev/null +++ b/http-protocol/src/main/java/com/arangodb/http/HttpCommunication.java @@ -0,0 +1,47 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.http; + +import com.arangodb.arch.UnstableApi; +import com.arangodb.internal.config.ArangoConfig; +import com.arangodb.internal.net.Communication; +import com.arangodb.internal.net.Connection; +import com.arangodb.internal.net.HostHandler; + +import java.io.IOException; + +/** + * @author Mark Vollmary + * @author Michele Rastelli + */ +@UnstableApi +public class HttpCommunication extends Communication { + + HttpCommunication(final ArangoConfig config, final HostHandler hostHandler) { + super(config, hostHandler); + } + + @Override + protected void connect(@UnstableApi Connection conn) throws IOException { + // no-op + } + +} diff --git a/http-protocol/src/main/java/com/arangodb/http/HttpConnection.java b/http-protocol/src/main/java/com/arangodb/http/HttpConnection.java new file mode 100644 index 000000000..f75c3639b --- /dev/null +++ b/http-protocol/src/main/java/com/arangodb/http/HttpConnection.java @@ -0,0 +1,331 @@ +/* + * DISCLAIMER + * + * Copyright 2017 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.http; + +import com.arangodb.*; +import com.arangodb.arch.UnstableApi; +import com.arangodb.config.HostDescription; +import com.arangodb.http.compression.Encoder; +import com.arangodb.internal.InternalRequest; +import com.arangodb.internal.InternalResponse; +import com.arangodb.internal.RequestType; +import com.arangodb.internal.config.ArangoConfig; +import com.arangodb.internal.net.Connection; +import com.arangodb.internal.net.ConnectionPool; +import com.arangodb.internal.serde.ContentTypeFactory; +import com.arangodb.internal.util.EncodeUtils; +import io.netty.handler.ssl.ApplicationProtocolConfig; +import io.netty.handler.ssl.ClientAuth; +import io.netty.handler.ssl.IdentityCipherSuiteFilter; +import io.netty.handler.ssl.JdkSslContext; +import io.vertx.core.MultiMap; +import io.vertx.core.Vertx; +import io.vertx.core.VertxOptions; +import io.vertx.core.buffer.Buffer; +import io.vertx.core.http.HttpHeaders; +import io.vertx.core.http.HttpMethod; +import io.vertx.core.http.HttpVersion; +import io.vertx.core.net.JdkSSLEngineOptions; +import io.vertx.core.spi.tls.SslContextFactory; +import io.vertx.ext.auth.authentication.TokenCredentials; +import io.vertx.ext.auth.authentication.UsernamePasswordCredentials; +import io.vertx.ext.web.client.HttpRequest; +import io.vertx.ext.web.client.HttpResponse; +import io.vertx.ext.web.client.WebClient; +import io.vertx.ext.web.client.WebClientOptions; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.net.ssl.SSLContext; +import java.util.Collections; +import java.util.Iterator; +import java.util.Map.Entry; +import java.util.Optional; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; + +import static com.arangodb.internal.net.ConnectionPoolImpl.HTTP1_SLOTS_PIPELINING; +import static com.arangodb.internal.net.ConnectionPoolImpl.HTTP2_SLOTS; + + +/** + * @author Mark Vollmary + * @author Michele Rastelli + */ +@UnstableApi +public class HttpConnection implements Connection { + private static final Logger LOGGER = LoggerFactory.getLogger(HttpConnection.class); + private static final String CONTENT_TYPE_APPLICATION_JSON_UTF8 = "application/json; charset=utf-8"; + private static final String CONTENT_TYPE_VPACK = "application/x-velocypack"; + private static final String USER_AGENT = getUserAgent(); + private static final AtomicInteger THREAD_COUNT = new AtomicInteger(); + private volatile String auth; + private final int compressionThreshold; + private final Encoder encoder; + private final WebClient client; + private final Integer timeout; + private final MultiMap commonHeaders = MultiMap.caseInsensitiveMultiMap(); + private final Vertx vertx; + private final Vertx vertxToClose; + private final ConnectionPool pool; + + private static String getUserAgent() { + return "JavaDriver/" + PackageVersion.VERSION + " (JVM/" + System.getProperty("java.specification.version") + ")"; + } + + HttpConnection(final ArangoConfig config, final HttpProtocolConfig protocolConfig, final HostDescription host, final ConnectionPool pool) { + this.pool = pool; + Protocol protocol = config.getProtocol(); + ContentType contentType = ContentTypeFactory.of(protocol); + if (contentType == ContentType.VPACK) { + commonHeaders.add(HttpHeaders.ACCEPT.toString(), CONTENT_TYPE_VPACK); + commonHeaders.add(HttpHeaders.CONTENT_TYPE.toString(), CONTENT_TYPE_VPACK); + } else if (contentType == ContentType.JSON) { + commonHeaders.add(HttpHeaders.ACCEPT.toString(), CONTENT_TYPE_APPLICATION_JSON_UTF8); + commonHeaders.add(HttpHeaders.CONTENT_TYPE.toString(), CONTENT_TYPE_APPLICATION_JSON_UTF8); + } else { + throw new IllegalArgumentException("Unsupported protocol: " + protocol); + } + compressionThreshold = config.getCompressionThreshold(); + Compression compression = config.getCompression(); + encoder = Encoder.of(compression, config.getCompressionLevel()); + if (encoder.getFormat() != null) { + commonHeaders.add(HttpHeaders.ACCEPT_ENCODING.toString(), encoder.getFormat()); + } + commonHeaders.add("x-arango-driver", USER_AGENT); + timeout = config.getTimeout(); + auth = new UsernamePasswordCredentials( + config.getUser(), Optional.ofNullable(config.getPassword()).orElse("") + ).toHttpAuthorization(); + + if (protocolConfig.getVertx() != null) { + // reuse existing Vert.x + vertx = protocolConfig.getVertx(); + // Vert.x will not be closed when connection is closed + vertxToClose = null; + LOGGER.debug("Reusing existing Vert.x instance"); + } else { + // create a new Vert.x instance + LOGGER.debug("Creating new Vert.x instance"); + vertx = Vertx.vertx(new VertxOptions().setPreferNativeTransport(true).setEventLoopPoolSize(1)); + vertx.runOnContext(e -> Thread.currentThread().setName("adb-http-" + THREAD_COUNT.getAndIncrement())); + // Vert.x be closed when connection is closed + vertxToClose = vertx; + } + + int intTtl = Optional.ofNullable(config.getConnectionTtl()) + .map(ttl -> Math.toIntExact(ttl / 1000)) + .orElse(0); + + HttpVersion httpVersion = protocol == Protocol.HTTP_JSON || protocol == Protocol.HTTP_VPACK ? + HttpVersion.HTTP_1_1 : HttpVersion.HTTP_2; + + WebClientOptions webClientOptions = new WebClientOptions() + .setMaxPoolSize(1) + .setHttp2MaxPoolSize(1) + .setConnectTimeout(timeout) + .setIdleTimeoutUnit(TimeUnit.MILLISECONDS) + .setIdleTimeout(timeout) + .setKeepAliveTimeout(intTtl) + .setHttp2KeepAliveTimeout(intTtl) + .setUserAgentEnabled(false) + .setFollowRedirects(false) + .setLogActivity(true) + .setKeepAlive(true) + .setTcpKeepAlive(true) + .setPipelining(config.getPipelining()) + .setPipeliningLimit(HTTP1_SLOTS_PIPELINING) + .setHttp2MultiplexingLimit(HTTP2_SLOTS) + .setReuseAddress(true) + .setReusePort(true) + .setHttp2ClearTextUpgrade(false) + .setProtocolVersion(httpVersion) + .setDefaultHost(host.getHost()) + .setDefaultPort(host.getPort()) + .setProxyOptions(protocolConfig.getProxyOptions()); + + if (compression != Compression.NONE) { + webClientOptions.setTryUseCompression(true); + } + + if (Boolean.TRUE.equals(config.getUseSsl())) { + SSLContext ctx = config.getSslContext(); + webClientOptions + .setSsl(true) + .setUseAlpn(true) + .setAlpnVersions(Collections.singletonList(httpVersion)) + .setVerifyHost(config.getVerifyHost()) + .setJdkSslEngineOptions(new JdkSSLEngineOptions() { + @Override + public JdkSSLEngineOptions copy() { + return this; + } + + @Override + public SslContextFactory sslContextFactory() { + return () -> new JdkSslContext( + ctx, + true, + null, + IdentityCipherSuiteFilter.INSTANCE, + new ApplicationProtocolConfig( + ApplicationProtocolConfig.Protocol.ALPN, + ApplicationProtocolConfig.SelectorFailureBehavior.FATAL_ALERT, + ApplicationProtocolConfig.SelectedListenerFailureBehavior.FATAL_ALERT, + httpVersion.alpnName() + ), + ClientAuth.OPTIONAL, + null, + false + ); + } + }); + } + + client = WebClient.create(vertx, webClientOptions); + } + + private static String buildUrl(final InternalRequest request) { + StringBuilder sb = new StringBuilder(); + String dbName = request.getDbName(); + if (dbName != null && !dbName.isEmpty()) { + sb.append("/_db/").append(EncodeUtils.encodeURIComponent(dbName)); + } + sb.append(request.getPath()); + if (!request.getQueryParam().isEmpty()) { + sb.append("?"); + for (Iterator> iterator = request.getQueryParam().entrySet().iterator(); iterator.hasNext(); ) { + Entry param = iterator.next(); + if (param.getValue() != null) { + sb.append(EncodeUtils.encodeURIComponent(param.getKey())); + sb.append("="); + sb.append(EncodeUtils.encodeURIComponent(param.getValue())); + if (iterator.hasNext()) { + sb.append("&"); + } + } + } + } + return sb.toString(); + } + + private static void addHeader(final InternalRequest request, final HttpRequest httpRequest) { + for (final Entry header : request.getHeaderParam().entrySet()) { + httpRequest.putHeader(header.getKey(), header.getValue()); + } + } + + @Override + public void close() { + client.close(); + if (vertxToClose != null) { + LOGGER.debug("Closing Vert.x instance"); + vertxToClose.close(); + } + } + + private HttpMethod requestTypeToHttpMethod(RequestType requestType) { + switch (requestType) { + case POST: + return HttpMethod.POST; + case PUT: + return HttpMethod.PUT; + case PATCH: + return HttpMethod.PATCH; + case DELETE: + return HttpMethod.DELETE; + case HEAD: + return HttpMethod.HEAD; + case GET: + default: + return HttpMethod.GET; + } + } + + @Override + public void release() { + vertx.runOnContext(__ -> pool.release(this)); + } + + @Override + @UnstableApi + public CompletableFuture executeAsync(@UnstableApi final InternalRequest request) { + CompletableFuture rfuture = new CompletableFuture<>(); + doExecute(request, rfuture); + return rfuture; + } + + private void doExecute(@UnstableApi final InternalRequest request, @UnstableApi final CompletableFuture rfuture) { + String path = buildUrl(request); + HttpRequest httpRequest = client + .request(requestTypeToHttpMethod(request.getRequestType()), path) + .timeout(timeout); + + httpRequest.putHeaders(commonHeaders); + addHeader(request, httpRequest); + httpRequest.putHeader(HttpHeaders.AUTHORIZATION.toString(), auth); + + byte[] reqBody = request.getBody(); + Buffer buffer; + if (reqBody == null) { + buffer = Buffer.buffer(); + } else if (reqBody.length > compressionThreshold) { + httpRequest.putHeader(HttpHeaders.CONTENT_ENCODING.toString(), encoder.getFormat()); + buffer = encoder.encode(reqBody); + } else { + buffer = Buffer.buffer(reqBody); + } + + try { + httpRequest.sendBuffer(buffer) + .map(this::buildResponse) + .onSuccess(rfuture::complete) + .onFailure(rfuture::completeExceptionally); + } catch (Exception e) { + rfuture.completeExceptionally(e); + } + } + + private InternalResponse buildResponse(final HttpResponse httpResponse) { + final InternalResponse response = new InternalResponse(); + response.setResponseCode(httpResponse.statusCode()); + Buffer body = httpResponse.body(); + if (body != null) { + byte[] bytes = body.getBytes(); + if (bytes.length > 0) { + response.setBody(bytes); + } + } + for (Entry header : httpResponse.headers()) { + response.putMeta(header.getKey(), header.getValue()); + } + return response; + } + + @Override + public void setJwt(String jwt) { + if (jwt != null) { + auth = new TokenCredentials(jwt).toHttpAuthorization(); + } + } + +} diff --git a/http-protocol/src/main/java/com/arangodb/http/HttpConnectionFactory.java b/http-protocol/src/main/java/com/arangodb/http/HttpConnectionFactory.java new file mode 100644 index 000000000..72c8c9086 --- /dev/null +++ b/http-protocol/src/main/java/com/arangodb/http/HttpConnectionFactory.java @@ -0,0 +1,58 @@ +/* + * DISCLAIMER + * + * Copyright 2018 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.http; + +import com.arangodb.PackageVersion; +import com.arangodb.arch.UnstableApi; +import com.arangodb.config.HostDescription; +import com.arangodb.internal.config.ArangoConfig; +import com.arangodb.internal.net.Connection; +import com.arangodb.internal.net.ConnectionFactory; +import com.arangodb.internal.net.ConnectionPool; +import io.vertx.core.Vertx; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@UnstableApi +public class HttpConnectionFactory implements ConnectionFactory { + private final Logger LOGGER = LoggerFactory.getLogger(HttpConnectionFactory.class); + + final HttpProtocolConfig protocolConfig; + + public HttpConnectionFactory(@UnstableApi final HttpProtocolConfig cfg) { + protocolConfig = cfg != null ? cfg : HttpProtocolConfig.builder().build(); + if (protocolConfig.getVertx() == null && !PackageVersion.SHADED && Vertx.currentContext() != null) { + LOGGER.warn("Found an existing Vert.x instance, you can reuse it by setting:\n" + + "new ArangoDB.Builder()\n" + + " // ...\n" + + " .protocolConfig(HttpProtocolConfig.builder().vertx(Vertx.currentContext().owner()).build())\n" + + " .build();\n"); + } + } + + @Override + @UnstableApi + public Connection create(@UnstableApi final ArangoConfig config, + final HostDescription host, + @UnstableApi final ConnectionPool pool) { + return new HttpConnection(config, protocolConfig, host, pool); + } +} diff --git a/src/main/java/com/arangodb/internal/http/HttpProtocol.java b/http-protocol/src/main/java/com/arangodb/http/HttpProtocol.java similarity index 50% rename from src/main/java/com/arangodb/internal/http/HttpProtocol.java rename to http-protocol/src/main/java/com/arangodb/http/HttpProtocol.java index 4c5e54bd9..601e2a33f 100644 --- a/src/main/java/com/arangodb/internal/http/HttpProtocol.java +++ b/http-protocol/src/main/java/com/arangodb/http/HttpProtocol.java @@ -1,62 +1,61 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.internal.http; - -import java.io.IOException; - -import org.apache.http.client.ClientProtocolException; - -import com.arangodb.ArangoDBException; -import com.arangodb.internal.net.CommunicationProtocol; -import com.arangodb.internal.net.HostHandle; -import com.arangodb.velocystream.Request; -import com.arangodb.velocystream.Response; - -/** - * @author Mark Vollmary - * - */ -public class HttpProtocol implements CommunicationProtocol { - - private final HttpCommunication httpCommunitaction; - - public HttpProtocol(final HttpCommunication httpCommunitaction) { - super(); - this.httpCommunitaction = httpCommunitaction; - } - - @Override - public Response execute(final Request request, final HostHandle hostHandle) throws ArangoDBException { - try { - return httpCommunitaction.execute(request, hostHandle); - } catch (final ClientProtocolException e) { - throw new ArangoDBException(e); - } catch (final IOException e) { - throw new ArangoDBException(e); - } - } - - @Override - public void close() throws IOException { - httpCommunitaction.disconnect(); - } - -} +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.http; + +import com.arangodb.arch.UnstableApi; +import com.arangodb.internal.InternalRequest; +import com.arangodb.internal.InternalResponse; +import com.arangodb.internal.net.CommunicationProtocol; +import com.arangodb.internal.net.HostHandle; + +import java.io.IOException; +import java.util.concurrent.CompletableFuture; + +/** + * @author Mark Vollmary + */ +@UnstableApi +public class HttpProtocol implements CommunicationProtocol { + + private final HttpCommunication httpCommunication; + + public HttpProtocol(final HttpCommunication httpCommunication) { + super(); + this.httpCommunication = httpCommunication; + } + + @Override + @UnstableApi + public CompletableFuture executeAsync(@UnstableApi final InternalRequest request, @UnstableApi final HostHandle hostHandle) { + return httpCommunication.executeAsync(request, hostHandle); + } + + @Override + public void setJwt(String jwt) { + // no-op: jwt is updated in the host handlers + } + + @Override + public void close() throws IOException { + httpCommunication.close(); + } + +} diff --git a/http-protocol/src/main/java/com/arangodb/http/HttpProtocolConfig.java b/http-protocol/src/main/java/com/arangodb/http/HttpProtocolConfig.java new file mode 100644 index 000000000..7a62dc505 --- /dev/null +++ b/http-protocol/src/main/java/com/arangodb/http/HttpProtocolConfig.java @@ -0,0 +1,59 @@ +package com.arangodb.http; + +import com.arangodb.config.ProtocolConfig; +import io.vertx.core.Vertx; +import io.vertx.core.net.ProxyOptions; + +public final class HttpProtocolConfig implements ProtocolConfig { + private final Vertx vertx; + private final ProxyOptions proxyOptions; + + public static Builder builder() { + return new Builder(); + } + + public static class Builder { + private Vertx vertx; + private ProxyOptions proxyOptions; + + private Builder() { + } + + /** + * Set the Vert.x instance to use for creating HTTP connections. + * + * @param vertx the Vert.x instance to use + * @return this builder + */ + public Builder vertx(Vertx vertx) { + this.vertx = vertx; + return this; + } + + /** + * @param proxyOptions proxy options for HTTP connections + * @return this builder + */ + public Builder proxyOptions(ProxyOptions proxyOptions) { + this.proxyOptions = proxyOptions; + return this; + } + + public HttpProtocolConfig build() { + return new HttpProtocolConfig(vertx, proxyOptions); + } + } + + private HttpProtocolConfig(Vertx vertx, ProxyOptions proxyOptions) { + this.vertx = vertx; + this.proxyOptions = proxyOptions; + } + + public Vertx getVertx() { + return vertx; + } + + public ProxyOptions getProxyOptions() { + return proxyOptions; + } +} diff --git a/http-protocol/src/main/java/com/arangodb/http/HttpProtocolProvider.java b/http-protocol/src/main/java/com/arangodb/http/HttpProtocolProvider.java new file mode 100644 index 000000000..a85abe9d8 --- /dev/null +++ b/http-protocol/src/main/java/com/arangodb/http/HttpProtocolProvider.java @@ -0,0 +1,41 @@ +package com.arangodb.http; + +import com.arangodb.Protocol; +import com.arangodb.arch.UnstableApi; +import com.arangodb.config.ProtocolConfig; +import com.arangodb.internal.config.ArangoConfig; +import com.arangodb.internal.net.CommunicationProtocol; +import com.arangodb.internal.net.ConnectionFactory; +import com.arangodb.internal.net.HostHandler; +import com.arangodb.internal.net.ProtocolProvider; +import com.fasterxml.jackson.databind.Module; + +@UnstableApi +public class HttpProtocolProvider implements ProtocolProvider { + + @Override + public boolean supportsProtocol(Protocol protocol) { + return Protocol.HTTP_VPACK.equals(protocol) || + Protocol.HTTP_JSON.equals(protocol) || + Protocol.HTTP2_VPACK.equals(protocol) || + Protocol.HTTP2_JSON.equals(protocol); + } + + @Override + @UnstableApi + public ConnectionFactory createConnectionFactory(@UnstableApi ProtocolConfig config) { + return new HttpConnectionFactory((HttpProtocolConfig) config); + } + + @Override + @UnstableApi + public CommunicationProtocol createProtocol(@UnstableApi ArangoConfig config, @UnstableApi HostHandler hostHandler) { + return new HttpProtocol(new HttpCommunication(config, hostHandler)); + } + + @Override + public Module protocolModule() { + return null; + } + +} diff --git a/http-protocol/src/main/java/com/arangodb/http/compression/Encoder.java b/http-protocol/src/main/java/com/arangodb/http/compression/Encoder.java new file mode 100644 index 000000000..840999265 --- /dev/null +++ b/http-protocol/src/main/java/com/arangodb/http/compression/Encoder.java @@ -0,0 +1,28 @@ +package com.arangodb.http.compression; + +import com.arangodb.Compression; +import io.netty.handler.codec.compression.ZlibWrapper; +import io.vertx.core.buffer.Buffer; + +public interface Encoder { + Buffer encode(byte[] data); + + String getFormat(); + + static Encoder of(Compression compression, int level) { + if (level < 0 || level > 9) { + throw new IllegalArgumentException("compression level: " + level + " (expected: 0-9)"); + } + + switch (compression) { + case GZIP: + return new ZlibEncoder(ZlibWrapper.GZIP, level, "gzip"); + case DEFLATE: + return new ZlibEncoder(ZlibWrapper.ZLIB, level, "deflate"); + case NONE: + return new NoopEncoder(); + default: + throw new IllegalArgumentException("Unsupported compression: " + compression); + } + } +} diff --git a/http-protocol/src/main/java/com/arangodb/http/compression/JdkZlibEncoder.java b/http-protocol/src/main/java/com/arangodb/http/compression/JdkZlibEncoder.java new file mode 100644 index 000000000..1eb332915 --- /dev/null +++ b/http-protocol/src/main/java/com/arangodb/http/compression/JdkZlibEncoder.java @@ -0,0 +1,191 @@ +/* + * Copyright 2012 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package com.arangodb.http.compression; + +import io.netty.buffer.ByteBuf; +import io.netty.buffer.ByteBufAllocator; +import io.netty.handler.codec.compression.CompressionException; +import io.netty.handler.codec.compression.ZlibWrapper; +import io.netty.util.internal.*; +import io.netty.util.internal.logging.InternalLogger; +import io.netty.util.internal.logging.InternalLoggerFactory; + +import java.util.zip.CRC32; +import java.util.zip.Deflater; + +/** + * Compresses a {@link ByteBuf} using the deflate algorithm. + */ +class JdkZlibEncoder { + + private static final InternalLogger logger = InternalLoggerFactory.getInstance(JdkZlibEncoder.class); + + /** + * Maximum initial size for temporary heap buffers used for the compressed output. Buffer may still grow beyond + * this if necessary. + */ + private static final int MAX_INITIAL_OUTPUT_BUFFER_SIZE; + /** + * Max size for temporary heap buffers used to copy input data to heap. + */ + private static final int MAX_INPUT_BUFFER_SIZE; + + private final ZlibWrapper wrapper; + private final Deflater deflater; + + /* + * GZIP support + */ + private final CRC32 crc = new CRC32(); + private static final byte[] gzipHeader = {0x1f, (byte) 0x8b, Deflater.DEFLATED, 0, 0, 0, 0, 0, 0, 0}; + + static { + MAX_INITIAL_OUTPUT_BUFFER_SIZE = SystemPropertyUtil.getInt( + "io.netty.jdkzlib.encoder.maxInitialOutputBufferSize", + 65536); + MAX_INPUT_BUFFER_SIZE = SystemPropertyUtil.getInt( + "io.netty.jdkzlib.encoder.maxInputBufferSize", + 65536); + + if (logger.isDebugEnabled()) { + logger.debug("-Dio.netty.jdkzlib.encoder.maxInitialOutputBufferSize={}", MAX_INITIAL_OUTPUT_BUFFER_SIZE); + logger.debug("-Dio.netty.jdkzlib.encoder.maxInputBufferSize={}", MAX_INPUT_BUFFER_SIZE); + } + } + + private static ByteBuf allocateByteBuf(int len) { + return ByteBufAllocator.DEFAULT.heapBuffer(len); + } + + private static ByteBuf allocateByteBuf() { + return ByteBufAllocator.DEFAULT.heapBuffer(); + } + + private static ByteBuf emptyBuf() { + return ByteBufAllocator.DEFAULT.heapBuffer(0, 0); + } + + /** + * Creates a new zlib encoder with the specified {@code compressionLevel} + * and the specified wrapper. + * + * @param compressionLevel {@code 1} yields the fastest compression and {@code 9} yields the + * best compression. {@code 0} means no compression. The default + * compression level is {@code 6}. + * @throws CompressionException if failed to initialize zlib + */ + JdkZlibEncoder(ZlibWrapper wrapper, int compressionLevel) { + ObjectUtil.checkInRange(compressionLevel, 0, 9, "compressionLevel"); + ObjectUtil.checkNotNull(wrapper, "wrapper"); + + if (wrapper == ZlibWrapper.ZLIB_OR_NONE) { + throw new IllegalArgumentException( + "wrapper '" + ZlibWrapper.ZLIB_OR_NONE + "' is not " + + "allowed for compression."); + } + + this.wrapper = wrapper; + deflater = new Deflater(compressionLevel, wrapper != ZlibWrapper.ZLIB); + } + + ByteBuf encode(byte[] in) { + if (in.length == 0) { + return emptyBuf(); + } + ByteBuf out = allocateBuffer(in.length); + encodeSome(in, out); + finishEncode(out); + return out; + } + + private void encodeSome(byte[] in, ByteBuf out) { + if (wrapper == ZlibWrapper.GZIP) { + out.writeBytes(gzipHeader); + } + if (wrapper == ZlibWrapper.GZIP) { + crc.update(in, 0, in.length); + } + + deflater.setInput(in); + for (; ; ) { + deflate(out); + if (!out.isWritable()) { + out.ensureWritable(out.writerIndex()); + } else if (deflater.needsInput()) { + break; + } + } + } + + private ByteBuf allocateBuffer(int length) { + int sizeEstimate = (int) Math.ceil(length * 1.001) + 12; + switch (wrapper) { + case GZIP: + sizeEstimate += gzipHeader.length; + break; + case ZLIB: + sizeEstimate += 2; // first two magic bytes + break; + default: + throw new IllegalArgumentException(); + } + // sizeEstimate might overflow if close to 2G + if (sizeEstimate < 0 || sizeEstimate > MAX_INITIAL_OUTPUT_BUFFER_SIZE) { + // can always expand later + return allocateByteBuf(MAX_INITIAL_OUTPUT_BUFFER_SIZE); + } + return allocateByteBuf(sizeEstimate); + } + + private void finishEncode(ByteBuf out) { + ByteBuf footer = allocateByteBuf(); + deflater.finish(); + while (!deflater.finished()) { + deflate(footer); + } + if (wrapper == ZlibWrapper.GZIP) { + int crcValue = (int) crc.getValue(); + int uncBytes = deflater.getTotalIn(); + footer.writeByte(crcValue); + footer.writeByte(crcValue >>> 8); + footer.writeByte(crcValue >>> 16); + footer.writeByte(crcValue >>> 24); + footer.writeByte(uncBytes); + footer.writeByte(uncBytes >>> 8); + footer.writeByte(uncBytes >>> 16); + footer.writeByte(uncBytes >>> 24); + } + out.writeBytes(footer); + deflater.reset(); + crc.reset(); + } + + private void deflate(ByteBuf out) { + int numBytes; + do { + int writerIndex = out.writerIndex(); + numBytes = deflater.deflate( + out.array(), out.arrayOffset() + writerIndex, out.writableBytes(), Deflater.SYNC_FLUSH); + out.writerIndex(writerIndex + numBytes); + } while (numBytes > 0); + } + + void close() { + deflater.reset(); + deflater.end(); + } +} diff --git a/http-protocol/src/main/java/com/arangodb/http/compression/NoopEncoder.java b/http-protocol/src/main/java/com/arangodb/http/compression/NoopEncoder.java new file mode 100644 index 000000000..e02750166 --- /dev/null +++ b/http-protocol/src/main/java/com/arangodb/http/compression/NoopEncoder.java @@ -0,0 +1,15 @@ +package com.arangodb.http.compression; + +import io.vertx.core.buffer.Buffer; + +class NoopEncoder implements Encoder { + @Override + public Buffer encode(byte[] data) { + return Buffer.buffer(data); + } + + @Override + public String getFormat() { + return null; + } +} diff --git a/http-protocol/src/main/java/com/arangodb/http/compression/ZlibEncoder.java b/http-protocol/src/main/java/com/arangodb/http/compression/ZlibEncoder.java new file mode 100644 index 000000000..f8ad91014 --- /dev/null +++ b/http-protocol/src/main/java/com/arangodb/http/compression/ZlibEncoder.java @@ -0,0 +1,31 @@ +package com.arangodb.http.compression; + +import io.netty.buffer.ByteBuf; +import io.netty.handler.codec.compression.ZlibWrapper; +import io.vertx.core.buffer.Buffer; + +class ZlibEncoder implements Encoder { + private final ZlibWrapper wrapper; + private final int level; + private final String format; + + ZlibEncoder(ZlibWrapper wrapper, int level, String format) { + this.wrapper = wrapper; + this.level = level; + this.format = format; + } + + @Override + public Buffer encode(byte[] data) { + JdkZlibEncoder encoder = new JdkZlibEncoder(wrapper, level); + ByteBuf bb = encoder.encode(data); + Buffer out = Buffer.buffer(bb); + encoder.close(); + return out; + } + + @Override + public String getFormat() { + return format; + } +} diff --git a/http-protocol/src/main/resources/META-INF/native-image/com.arangodb/http-protocol/native-image.properties b/http-protocol/src/main/resources/META-INF/native-image/com.arangodb/http-protocol/native-image.properties new file mode 100644 index 000000000..6323e7ae3 --- /dev/null +++ b/http-protocol/src/main/resources/META-INF/native-image/com.arangodb/http-protocol/native-image.properties @@ -0,0 +1,3 @@ +Args=\ +-H:ResourceConfigurationResources=${.}/resource-config-spi.json \ +-H:ReflectionConfigurationResources=${.}/reflect-config-spi.json,${.}/reflect-config-serde.json diff --git a/http-protocol/src/main/resources/META-INF/native-image/com.arangodb/http-protocol/reflect-config-serde.json b/http-protocol/src/main/resources/META-INF/native-image/com.arangodb/http-protocol/reflect-config-serde.json new file mode 100644 index 000000000..c50a5e113 --- /dev/null +++ b/http-protocol/src/main/resources/META-INF/native-image/com.arangodb/http-protocol/reflect-config-serde.json @@ -0,0 +1,146 @@ +[ + { + "name": "com.arangodb.internal.serde.JacksonUtils$JsonFactory", + "queryAllDeclaredMethods": true + }, + { + "name": "com.arangodb.internal.serde.JacksonUtils$StreamReadConstraints", + "queryAllDeclaredMethods": true + }, + { + "name": "com.arangodb.internal.serde.JacksonUtils$StreamReadConstraints$Builder", + "queryAllDeclaredMethods": true + }, + { + "name": "com.arangodb.internal.serde.JacksonUtils$StreamReadConstraints$Static", + "queryAllDeclaredMethods": true + }, + { + "name": "com.arangodb.internal.serde.JacksonUtils$StreamWriteConstraints", + "queryAllDeclaredMethods": true + }, + { + "name": "com.arangodb.internal.serde.JacksonUtils$StreamWriteConstraints$Builder", + "queryAllDeclaredMethods": true + }, + { + "name": "com.arangodb.internal.serde.JacksonUtils$StreamWriteConstraints$Static", + "queryAllDeclaredMethods": true + }, + { + "name": "com.arangodb.internal.serde.JacksonUtils$Version", + "queryAllDeclaredMethods": true + }, + { + "name": "com.fasterxml.jackson.core.JsonFactory", + "queryAllPublicMethods": true, + "methods": [ + { + "name": "setStreamReadConstraints", + "parameterTypes": [ + "com.fasterxml.jackson.core.StreamReadConstraints" + ] + }, + { + "name": "setStreamWriteConstraints", + "parameterTypes": [ + "com.fasterxml.jackson.core.StreamWriteConstraints" + ] + }, + { + "name": "version", + "parameterTypes": [] + } + ] + }, + { + "name": "com.fasterxml.jackson.core.StreamReadConstraints", + "queryAllPublicMethods": true, + "methods": [ + { + "name": "builder", + "parameterTypes": [] + } + ] + }, + { + "name": "com.fasterxml.jackson.core.StreamReadConstraints$Builder", + "queryAllPublicMethods": true, + "methods": [ + { + "name": "build", + "parameterTypes": [] + }, + { + "name": "maxDocumentLength", + "parameterTypes": [ + "long" + ] + }, + { + "name": "maxNameLength", + "parameterTypes": [ + "int" + ] + }, + { + "name": "maxNestingDepth", + "parameterTypes": [ + "int" + ] + }, + { + "name": "maxNumberLength", + "parameterTypes": [ + "int" + ] + }, + { + "name": "maxStringLength", + "parameterTypes": [ + "int" + ] + } + ] + }, + { + "name": "com.fasterxml.jackson.core.StreamWriteConstraints", + "queryAllPublicMethods": true, + "methods": [ + { + "name": "builder", + "parameterTypes": [] + } + ] + }, + { + "name": "com.fasterxml.jackson.core.StreamWriteConstraints$Builder", + "queryAllPublicMethods": true, + "methods": [ + { + "name": "build", + "parameterTypes": [] + }, + { + "name": "maxNestingDepth", + "parameterTypes": [ + "int" + ] + } + ] + }, + { + "name": "com.fasterxml.jackson.core.Version", + "queryAllPublicMethods": true, + "methods": [ + { + "name": "getMajorVersion", + "parameterTypes": [] + }, + { + "name": "getMinorVersion", + "parameterTypes": [] + } + ] + } +] \ No newline at end of file diff --git a/http-protocol/src/main/resources/META-INF/native-image/com.arangodb/http-protocol/reflect-config-spi.json b/http-protocol/src/main/resources/META-INF/native-image/com.arangodb/http-protocol/reflect-config-spi.json new file mode 100644 index 000000000..be5c8f170 --- /dev/null +++ b/http-protocol/src/main/resources/META-INF/native-image/com.arangodb/http-protocol/reflect-config-spi.json @@ -0,0 +1,11 @@ +[ + { + "name": "com.arangodb.http.HttpProtocolProvider", + "methods": [ + { + "name": "", + "parameterTypes": [] + } + ] + } +] diff --git a/http-protocol/src/main/resources/META-INF/native-image/com.arangodb/http-protocol/resource-config-spi.json b/http-protocol/src/main/resources/META-INF/native-image/com.arangodb/http-protocol/resource-config-spi.json new file mode 100644 index 000000000..9037d85e5 --- /dev/null +++ b/http-protocol/src/main/resources/META-INF/native-image/com.arangodb/http-protocol/resource-config-spi.json @@ -0,0 +1,10 @@ +{ + "resources": { + "includes": [ + { + "pattern": "META-INF/services/com.arangodb.internal.net.ProtocolProvider" + } + ] + }, + "bundles": [] +} diff --git a/http-protocol/src/main/resources/META-INF/services/com.arangodb.internal.net.ProtocolProvider b/http-protocol/src/main/resources/META-INF/services/com.arangodb.internal.net.ProtocolProvider new file mode 100644 index 000000000..f48d178b7 --- /dev/null +++ b/http-protocol/src/main/resources/META-INF/services/com.arangodb.internal.net.ProtocolProvider @@ -0,0 +1 @@ +com.arangodb.http.HttpProtocolProvider diff --git a/http-protocol/src/main/resources/META-INF/vertx/vertx-version.txt b/http-protocol/src/main/resources/META-INF/vertx/vertx-version.txt new file mode 100644 index 000000000..a6695ff98 --- /dev/null +++ b/http-protocol/src/main/resources/META-INF/vertx/vertx-version.txt @@ -0,0 +1 @@ +4.3.4 \ No newline at end of file diff --git a/jackson-serde-json/pom.xml b/jackson-serde-json/pom.xml new file mode 100644 index 000000000..703fd8714 --- /dev/null +++ b/jackson-serde-json/pom.xml @@ -0,0 +1,45 @@ + + + 4.0.0 + + + ../release-parent + com.arangodb + release-parent + 7.22.0 + + + jackson-serde-json + jackson-serde-json + Jackson Serde JSON module for ArangoDB Java Driver + + + com.arangodb.serde.jackson.json + + + + + com.arangodb + core + provided + + + com.fasterxml.jackson.core + jackson-databind + compile + + + com.fasterxml.jackson.core + jackson-core + compile + + + com.fasterxml.jackson.core + jackson-annotations + compile + + + + \ No newline at end of file diff --git a/jackson-serde-json/src/main/java/com/arangodb/serde/jackson/From.java b/jackson-serde-json/src/main/java/com/arangodb/serde/jackson/From.java new file mode 100644 index 000000000..8a5b12021 --- /dev/null +++ b/jackson-serde-json/src/main/java/com/arangodb/serde/jackson/From.java @@ -0,0 +1,14 @@ +package com.arangodb.serde.jackson; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * @author Michele Rastelli + */ +@Target({ElementType.FIELD, ElementType.METHOD, ElementType.PARAMETER}) +@Retention(RetentionPolicy.RUNTIME) +public @interface From { +} diff --git a/jackson-serde-json/src/main/java/com/arangodb/serde/jackson/Id.java b/jackson-serde-json/src/main/java/com/arangodb/serde/jackson/Id.java new file mode 100644 index 000000000..da57af859 --- /dev/null +++ b/jackson-serde-json/src/main/java/com/arangodb/serde/jackson/Id.java @@ -0,0 +1,14 @@ +package com.arangodb.serde.jackson; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * @author Michele Rastelli + */ +@Target({ElementType.FIELD, ElementType.METHOD, ElementType.PARAMETER}) +@Retention(RetentionPolicy.RUNTIME) +public @interface Id { +} diff --git a/jackson-serde-json/src/main/java/com/arangodb/serde/jackson/JacksonMapperProvider.java b/jackson-serde-json/src/main/java/com/arangodb/serde/jackson/JacksonMapperProvider.java new file mode 100644 index 000000000..39c6065a0 --- /dev/null +++ b/jackson-serde-json/src/main/java/com/arangodb/serde/jackson/JacksonMapperProvider.java @@ -0,0 +1,52 @@ +package com.arangodb.serde.jackson; + +import com.arangodb.ArangoDBException; +import com.arangodb.ContentType; +import com.arangodb.internal.serde.JacksonUtils; +import com.fasterxml.jackson.core.JsonFactory; +import com.fasterxml.jackson.databind.ObjectMapper; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.Iterator; +import java.util.ServiceConfigurationError; +import java.util.ServiceLoader; + +/** + * Not shaded in arangodb-java-driver-shaded. + */ +public class JacksonMapperProvider { + private static final Logger LOG = LoggerFactory.getLogger(JacksonMapperProvider.class); + + public static ObjectMapper of(final ContentType contentType) { + String formatName; + if (contentType == ContentType.JSON) { + formatName = "JSON"; + } else if (contentType == ContentType.VPACK) { + formatName = "Velocypack"; + } else { + throw new IllegalArgumentException("Unexpected value: " + contentType); + } + + ServiceLoader sl = ServiceLoader.load(JsonFactory.class); + Iterator iterator = sl.iterator(); + while (iterator.hasNext()) { + JsonFactory jf; + try { + jf = iterator.next(); + } catch (ServiceConfigurationError e) { + LOG.warn("ServiceLoader failed to load JsonFactory", e); + continue; + } + if (formatName.equals(jf.getFormatName())) { + if (contentType == ContentType.JSON) { + JacksonUtils.tryConfigureJsonFactory(jf); + } + return new ObjectMapper(jf); + } + LOG.debug("Required format ({}) not supported by JsonFactory: {}", formatName, jf.getClass().getName()); + } + + throw new ArangoDBException("No JsonFactory found for content type: " + contentType); + } +} diff --git a/jackson-serde-json/src/main/java/com/arangodb/serde/jackson/JacksonSerde.java b/jackson-serde-json/src/main/java/com/arangodb/serde/jackson/JacksonSerde.java new file mode 100644 index 000000000..8a749121e --- /dev/null +++ b/jackson-serde-json/src/main/java/com/arangodb/serde/jackson/JacksonSerde.java @@ -0,0 +1,56 @@ +package com.arangodb.serde.jackson; + +import com.arangodb.ContentType; +import com.arangodb.serde.ArangoSerde; +import com.arangodb.RequestContext; +import com.arangodb.serde.jackson.internal.JacksonSerdeImpl; +import com.fasterxml.jackson.databind.DeserializationContext; +import com.fasterxml.jackson.databind.ObjectMapper; + +import java.util.function.Consumer; + +import static com.arangodb.serde.jackson.internal.JacksonSerdeImpl.SERDE_CONTEXT_ATTRIBUTE_NAME; + +/** + * User data serde based on Jackson Databind. Not shaded in arangodb-java-driver-shaded. + */ +public interface JacksonSerde extends ArangoSerde { + + /** + * Creates a new JacksonSerde with default settings for the specified data type. + * + * @param contentType serialization target data type + * @return the created JacksonSerde + */ + static JacksonSerde of(final ContentType contentType) { + return create(JacksonMapperProvider.of(contentType)); + } + + /** + * Creates a new JacksonSerde using the provided ObjectMapper. + * + * @param mapper Jackson ObjectMapper to use + * @return the created JacksonSerde + */ + static JacksonSerde create(final ObjectMapper mapper) { + return new JacksonSerdeImpl(mapper); + } + + /** + * Extracts the {@link RequestContext} from the current {@link DeserializationContext}. + * + * @param ctx current Jackson {@link DeserializationContext} + * @return current {@link RequestContext} + */ + static RequestContext getRequestContext(DeserializationContext ctx) { + return (RequestContext) ctx.getAttribute(SERDE_CONTEXT_ATTRIBUTE_NAME); + } + + /** + * Allows configuring the underlying Jackson ObjectMapper + * + * @param configureFunction function to configure the Jackson ObjectMapper + */ + JacksonSerde configure(final Consumer configureFunction); + +} diff --git a/jackson-serde-json/src/main/java/com/arangodb/serde/jackson/Key.java b/jackson-serde-json/src/main/java/com/arangodb/serde/jackson/Key.java new file mode 100644 index 000000000..a066db02b --- /dev/null +++ b/jackson-serde-json/src/main/java/com/arangodb/serde/jackson/Key.java @@ -0,0 +1,14 @@ +package com.arangodb.serde.jackson; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * @author Michele Rastelli + */ +@Target({ElementType.FIELD, ElementType.METHOD, ElementType.PARAMETER}) +@Retention(RetentionPolicy.RUNTIME) +public @interface Key { +} diff --git a/jackson-serde-json/src/main/java/com/arangodb/serde/jackson/Rev.java b/jackson-serde-json/src/main/java/com/arangodb/serde/jackson/Rev.java new file mode 100644 index 000000000..71dcac153 --- /dev/null +++ b/jackson-serde-json/src/main/java/com/arangodb/serde/jackson/Rev.java @@ -0,0 +1,14 @@ +package com.arangodb.serde.jackson; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * @author Michele Rastelli + */ +@Target({ElementType.FIELD, ElementType.METHOD, ElementType.PARAMETER}) +@Retention(RetentionPolicy.RUNTIME) +public @interface Rev { +} diff --git a/jackson-serde-json/src/main/java/com/arangodb/serde/jackson/To.java b/jackson-serde-json/src/main/java/com/arangodb/serde/jackson/To.java new file mode 100644 index 000000000..8886a1ef4 --- /dev/null +++ b/jackson-serde-json/src/main/java/com/arangodb/serde/jackson/To.java @@ -0,0 +1,14 @@ +package com.arangodb.serde.jackson; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * @author Michele Rastelli + */ +@Target({ElementType.FIELD, ElementType.METHOD, ElementType.PARAMETER}) +@Retention(RetentionPolicy.RUNTIME) +public @interface To { +} diff --git a/jackson-serde-json/src/main/java/com/arangodb/serde/jackson/internal/ArangoSerdeAnnotationIntrospector.java b/jackson-serde-json/src/main/java/com/arangodb/serde/jackson/internal/ArangoSerdeAnnotationIntrospector.java new file mode 100644 index 000000000..d91fb31f6 --- /dev/null +++ b/jackson-serde-json/src/main/java/com/arangodb/serde/jackson/internal/ArangoSerdeAnnotationIntrospector.java @@ -0,0 +1,60 @@ +package com.arangodb.serde.jackson.internal; + +import com.arangodb.serde.jackson.*; +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.databind.PropertyName; +import com.fasterxml.jackson.databind.introspect.Annotated; +import com.fasterxml.jackson.databind.introspect.JacksonAnnotationIntrospector; + +import java.lang.annotation.Annotation; +import java.util.HashMap; +import java.util.Map; +import java.util.Optional; + +class ArangoSerdeAnnotationIntrospector extends JacksonAnnotationIntrospector { + private static final JsonInclude JSON_INCLUDE_NON_NULL = JsonIncludeNonNull.class.getAnnotation(JsonInclude.class); + private static final Map, String> MAPPINGS; + private static final Class[] ANNOTATIONS; + + static { + MAPPINGS = new HashMap<>(); + MAPPINGS.put(Id.class, "_id"); + MAPPINGS.put(Key.class, "_key"); + MAPPINGS.put(Rev.class, "_rev"); + MAPPINGS.put(From.class, "_from"); + MAPPINGS.put(To.class, "_to"); + ANNOTATIONS = MAPPINGS.keySet().toArray(new Class[0]); + } + + @JsonInclude(JsonInclude.Include.NON_NULL) + private static class JsonIncludeNonNull { + } + + @Override + public PropertyName findNameForSerialization(Annotated a) { + return Optional.ofNullable(findMapping(a)).orElseGet(() -> super.findNameForSerialization(a)); + } + + @Override + public PropertyName findNameForDeserialization(Annotated a) { + return Optional.ofNullable(findMapping(a)).orElseGet(() -> super.findNameForDeserialization(a)); + } + + private PropertyName findMapping(Annotated a) { + for (Map.Entry, String> e : MAPPINGS.entrySet()) { + if (_hasAnnotation(a, e.getKey())) { + return PropertyName.construct(e.getValue()); + } + } + return null; + } + + @Override + public JsonInclude.Value findPropertyInclusion(Annotated a) { + if (_hasOneOf(a, ANNOTATIONS)) { + return new JsonInclude.Value(JSON_INCLUDE_NON_NULL); + } else { + return super.findPropertyInclusion(a); + } + } +} diff --git a/jackson-serde-json/src/main/java/com/arangodb/serde/jackson/internal/JacksonSerdeImpl.java b/jackson-serde-json/src/main/java/com/arangodb/serde/jackson/internal/JacksonSerdeImpl.java new file mode 100644 index 000000000..f7c5b2a5a --- /dev/null +++ b/jackson-serde-json/src/main/java/com/arangodb/serde/jackson/internal/JacksonSerdeImpl.java @@ -0,0 +1,64 @@ +package com.arangodb.serde.jackson.internal; + +import com.arangodb.RequestContext; +import com.arangodb.serde.jackson.JacksonSerde; +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.DeserializationFeature; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.cfg.ContextAttributes; + +import java.io.IOException; +import java.util.Objects; +import java.util.function.Consumer; + + +/** + * Not shaded in arangodb-java-driver-shaded. + */ +public final class JacksonSerdeImpl implements JacksonSerde { + public static final String SERDE_CONTEXT_ATTRIBUTE_NAME = "arangoRequestContext"; + + private final ObjectMapper mapper; + + public JacksonSerdeImpl(final ObjectMapper mapper) { + this.mapper = mapper; + mapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); + mapper.setAnnotationIntrospector(new ArangoSerdeAnnotationIntrospector()); + } + + @Override + public byte[] serialize(final Object value) { + try { + return mapper.writeValueAsBytes(value); + } catch (JsonProcessingException e) { + throw new RuntimeException(e); + } + } + + @Override + public T deserialize(final byte[] content, final Class type) { + return deserialize(content, type, RequestContext.EMPTY); + } + + @Override + public T deserialize(byte[] content, Class type, RequestContext ctx) { + Objects.requireNonNull(ctx); + if (content == null || content.length == 0) { + return null; + } + try { + return mapper.readerFor(mapper.constructType(type)) + .with(ContextAttributes.getEmpty().withPerCallAttribute(SERDE_CONTEXT_ATTRIBUTE_NAME, ctx)) + .readValue(content); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + @Override + public JacksonSerde configure(Consumer configureFunction) { + configureFunction.accept(mapper); + return this; + } + +} diff --git a/jackson-serde-json/src/main/java/com/arangodb/serde/jackson/json/JacksonJsonSerdeProvider.java b/jackson-serde-json/src/main/java/com/arangodb/serde/jackson/json/JacksonJsonSerdeProvider.java new file mode 100644 index 000000000..90dfdf5c1 --- /dev/null +++ b/jackson-serde-json/src/main/java/com/arangodb/serde/jackson/json/JacksonJsonSerdeProvider.java @@ -0,0 +1,18 @@ +package com.arangodb.serde.jackson.json; + +import com.arangodb.ContentType; +import com.arangodb.serde.ArangoSerde; +import com.arangodb.serde.ArangoSerdeProvider; +import com.arangodb.serde.jackson.JacksonSerde; + +public class JacksonJsonSerdeProvider implements ArangoSerdeProvider { + @Override + public ArangoSerde create() { + return JacksonSerde.of(ContentType.JSON); + } + + @Override + public ContentType getContentType() { + return ContentType.JSON; + } +} diff --git a/jackson-serde-json/src/main/resources/META-INF/native-image/com.arangodb/jackson-serde-json/native-image.properties b/jackson-serde-json/src/main/resources/META-INF/native-image/com.arangodb/jackson-serde-json/native-image.properties new file mode 100644 index 000000000..f6d4bf39a --- /dev/null +++ b/jackson-serde-json/src/main/resources/META-INF/native-image/com.arangodb/jackson-serde-json/native-image.properties @@ -0,0 +1,3 @@ +Args=\ +-H:ResourceConfigurationResources=${.}/resource-config-spi.json \ +-H:ReflectionConfigurationResources=${.}/reflect-config-spi.json diff --git a/jackson-serde-json/src/main/resources/META-INF/native-image/com.arangodb/jackson-serde-json/reflect-config-spi.json b/jackson-serde-json/src/main/resources/META-INF/native-image/com.arangodb/jackson-serde-json/reflect-config-spi.json new file mode 100644 index 000000000..932e86cf3 --- /dev/null +++ b/jackson-serde-json/src/main/resources/META-INF/native-image/com.arangodb/jackson-serde-json/reflect-config-spi.json @@ -0,0 +1,11 @@ +[ + { + "name": "com.arangodb.serde.jackson.json.JacksonJsonSerdeProvider", + "methods": [ + { + "name": "", + "parameterTypes": [] + } + ] + } +] diff --git a/jackson-serde-json/src/main/resources/META-INF/native-image/com.arangodb/jackson-serde-json/resource-config-spi.json b/jackson-serde-json/src/main/resources/META-INF/native-image/com.arangodb/jackson-serde-json/resource-config-spi.json new file mode 100644 index 000000000..520213dcf --- /dev/null +++ b/jackson-serde-json/src/main/resources/META-INF/native-image/com.arangodb/jackson-serde-json/resource-config-spi.json @@ -0,0 +1,10 @@ +{ + "resources": { + "includes": [ + { + "pattern": "META-INF/services/com.arangodb.serde.ArangoSerdeProvider" + } + ] + }, + "bundles": [] +} diff --git a/jackson-serde-json/src/main/resources/META-INF/services/com.arangodb.serde.ArangoSerdeProvider b/jackson-serde-json/src/main/resources/META-INF/services/com.arangodb.serde.ArangoSerdeProvider new file mode 100644 index 000000000..1d8eb8bc7 --- /dev/null +++ b/jackson-serde-json/src/main/resources/META-INF/services/com.arangodb.serde.ArangoSerdeProvider @@ -0,0 +1 @@ +com.arangodb.serde.jackson.json.JacksonJsonSerdeProvider diff --git a/jackson-serde-vpack/pom.xml b/jackson-serde-vpack/pom.xml new file mode 100644 index 000000000..c6bc761f5 --- /dev/null +++ b/jackson-serde-vpack/pom.xml @@ -0,0 +1,45 @@ + + + 4.0.0 + + + ../release-parent + com.arangodb + release-parent + 7.22.0 + + + jackson-serde-vpack + jackson-serde-vpack + Jackson Serde VPACK module for ArangoDB Java Driver + + + com.arangodb.serde.jackson.vpack + + + + + com.arangodb + core + provided + + + com.arangodb + jackson-serde-json + compile + + + com.arangodb + jackson-dataformat-velocypack + compile + + + com.arangodb + velocypack + compile + + + + diff --git a/jackson-serde-vpack/src/main/java/com/arangodb/serde/jackson/vpack/JacksonVPackSerdeProvider.java b/jackson-serde-vpack/src/main/java/com/arangodb/serde/jackson/vpack/JacksonVPackSerdeProvider.java new file mode 100644 index 000000000..6e52ec179 --- /dev/null +++ b/jackson-serde-vpack/src/main/java/com/arangodb/serde/jackson/vpack/JacksonVPackSerdeProvider.java @@ -0,0 +1,18 @@ +package com.arangodb.serde.jackson.vpack; + +import com.arangodb.ContentType; +import com.arangodb.serde.ArangoSerde; +import com.arangodb.serde.ArangoSerdeProvider; +import com.arangodb.serde.jackson.JacksonSerde; + +public class JacksonVPackSerdeProvider implements ArangoSerdeProvider { + @Override + public ArangoSerde create() { + return JacksonSerde.of(ContentType.VPACK); + } + + @Override + public ContentType getContentType() { + return ContentType.VPACK; + } +} diff --git a/jackson-serde-vpack/src/main/resources/META-INF/services/com.arangodb.serde.ArangoSerdeProvider b/jackson-serde-vpack/src/main/resources/META-INF/services/com.arangodb.serde.ArangoSerdeProvider new file mode 100644 index 000000000..977adc7a9 --- /dev/null +++ b/jackson-serde-vpack/src/main/resources/META-INF/services/com.arangodb.serde.ArangoSerdeProvider @@ -0,0 +1 @@ +com.arangodb.serde.jackson.vpack.JacksonVPackSerdeProvider diff --git a/jsonb-serde/pom.xml b/jsonb-serde/pom.xml new file mode 100644 index 000000000..1cfa40826 --- /dev/null +++ b/jsonb-serde/pom.xml @@ -0,0 +1,36 @@ + + + 4.0.0 + + + ../release-parent + com.arangodb + release-parent + 7.22.0 + + + jsonb-serde + jsonb-serde + JsonB Serde module for ArangoDB Java Driver + + + com.arangodb.serde.jsonb + + + + + com.arangodb + core + provided + + + jakarta.json.bind + jakarta.json.bind-api + 3.0.0 + compile + + + + \ No newline at end of file diff --git a/jsonb-serde/src/main/java/com/arangodb/serde/jsonb/From.java b/jsonb-serde/src/main/java/com/arangodb/serde/jsonb/From.java new file mode 100644 index 000000000..057be2105 --- /dev/null +++ b/jsonb-serde/src/main/java/com/arangodb/serde/jsonb/From.java @@ -0,0 +1,19 @@ +package com.arangodb.serde.jsonb; + +import jakarta.json.bind.annotation.JsonbAnnotation; +import jakarta.json.bind.annotation.JsonbProperty; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * @author Michele Rastelli + */ +@Target({ElementType.FIELD, ElementType.METHOD, ElementType.PARAMETER}) +@Retention(RetentionPolicy.RUNTIME) +@JsonbAnnotation +@JsonbProperty("_from") +public @interface From { +} diff --git a/jsonb-serde/src/main/java/com/arangodb/serde/jsonb/Id.java b/jsonb-serde/src/main/java/com/arangodb/serde/jsonb/Id.java new file mode 100644 index 000000000..f2c4f8462 --- /dev/null +++ b/jsonb-serde/src/main/java/com/arangodb/serde/jsonb/Id.java @@ -0,0 +1,19 @@ +package com.arangodb.serde.jsonb; + +import jakarta.json.bind.annotation.JsonbAnnotation; +import jakarta.json.bind.annotation.JsonbProperty; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * @author Michele Rastelli + */ +@Target({ElementType.FIELD, ElementType.METHOD, ElementType.PARAMETER}) +@Retention(RetentionPolicy.RUNTIME) +@JsonbAnnotation +@JsonbProperty("_id") +public @interface Id { +} diff --git a/jsonb-serde/src/main/java/com/arangodb/serde/jsonb/JsonbSerde.java b/jsonb-serde/src/main/java/com/arangodb/serde/jsonb/JsonbSerde.java new file mode 100644 index 000000000..b1d2baa36 --- /dev/null +++ b/jsonb-serde/src/main/java/com/arangodb/serde/jsonb/JsonbSerde.java @@ -0,0 +1,35 @@ +package com.arangodb.serde.jsonb; + +import com.arangodb.serde.ArangoSerde; +import jakarta.json.bind.Jsonb; +import jakarta.json.bind.JsonbBuilder; +import jakarta.json.bind.JsonbConfig; + +import java.nio.charset.StandardCharsets; + +/** + * User data serde based on Jakarta JSON Binding (JSON-B). + */ +public class JsonbSerde implements ArangoSerde { + + private final Jsonb jsonb; + + public JsonbSerde() { + jsonb = JsonbBuilder.create(); + } + + public JsonbSerde(final JsonbConfig config) { + jsonb = JsonbBuilder.create(config); + } + + @Override + public byte[] serialize(Object value) { + return jsonb.toJson(value).getBytes(StandardCharsets.UTF_8); + } + + @Override + public T deserialize(byte[] content, Class type) { + return jsonb.fromJson(new String(content, StandardCharsets.UTF_8), type); + } + +} diff --git a/jsonb-serde/src/main/java/com/arangodb/serde/jsonb/JsonbSerdeProvider.java b/jsonb-serde/src/main/java/com/arangodb/serde/jsonb/JsonbSerdeProvider.java new file mode 100644 index 000000000..152d8f3fd --- /dev/null +++ b/jsonb-serde/src/main/java/com/arangodb/serde/jsonb/JsonbSerdeProvider.java @@ -0,0 +1,34 @@ +package com.arangodb.serde.jsonb; + +import com.arangodb.ContentType; +import com.arangodb.serde.ArangoSerdeProvider; +import jakarta.json.bind.JsonbConfig; + +public class JsonbSerdeProvider implements ArangoSerdeProvider { + + /** + * Creates a new JsonbSerde with default settings. + * + * @return the created JsonbSerde + */ + @Override + public JsonbSerde create() { + return new JsonbSerde(); + } + + /** + * Creates a new JsonbSerde using the provided configuration. + * + * @param config JsonbConfig to use + * @return the created JsonbSerde + */ + static JsonbSerde create(final JsonbConfig config) { + return new JsonbSerde(config); + } + + @Override + public ContentType getContentType() { + return ContentType.JSON; + } + +} diff --git a/jsonb-serde/src/main/java/com/arangodb/serde/jsonb/Key.java b/jsonb-serde/src/main/java/com/arangodb/serde/jsonb/Key.java new file mode 100644 index 000000000..e5ff89558 --- /dev/null +++ b/jsonb-serde/src/main/java/com/arangodb/serde/jsonb/Key.java @@ -0,0 +1,19 @@ +package com.arangodb.serde.jsonb; + +import jakarta.json.bind.annotation.JsonbAnnotation; +import jakarta.json.bind.annotation.JsonbProperty; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * @author Michele Rastelli + */ +@Target({ElementType.FIELD, ElementType.METHOD, ElementType.PARAMETER}) +@Retention(RetentionPolicy.RUNTIME) +@JsonbAnnotation +@JsonbProperty("_key") +public @interface Key { +} diff --git a/jsonb-serde/src/main/java/com/arangodb/serde/jsonb/Rev.java b/jsonb-serde/src/main/java/com/arangodb/serde/jsonb/Rev.java new file mode 100644 index 000000000..86a7a5ee1 --- /dev/null +++ b/jsonb-serde/src/main/java/com/arangodb/serde/jsonb/Rev.java @@ -0,0 +1,19 @@ +package com.arangodb.serde.jsonb; + +import jakarta.json.bind.annotation.JsonbAnnotation; +import jakarta.json.bind.annotation.JsonbProperty; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * @author Michele Rastelli + */ +@Target({ElementType.FIELD, ElementType.METHOD, ElementType.PARAMETER}) +@Retention(RetentionPolicy.RUNTIME) +@JsonbAnnotation +@JsonbProperty("_rev") +public @interface Rev { +} diff --git a/jsonb-serde/src/main/java/com/arangodb/serde/jsonb/To.java b/jsonb-serde/src/main/java/com/arangodb/serde/jsonb/To.java new file mode 100644 index 000000000..89b4ef7d2 --- /dev/null +++ b/jsonb-serde/src/main/java/com/arangodb/serde/jsonb/To.java @@ -0,0 +1,19 @@ +package com.arangodb.serde.jsonb; + +import jakarta.json.bind.annotation.JsonbAnnotation; +import jakarta.json.bind.annotation.JsonbProperty; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * @author Michele Rastelli + */ +@Target({ElementType.FIELD, ElementType.METHOD, ElementType.PARAMETER}) +@Retention(RetentionPolicy.RUNTIME) +@JsonbAnnotation +@JsonbProperty("_to") +public @interface To { +} diff --git a/jsonb-serde/src/main/resources/META-INF/native-image/com.arangodb/json-serde/native-image.properties b/jsonb-serde/src/main/resources/META-INF/native-image/com.arangodb/json-serde/native-image.properties new file mode 100644 index 000000000..f6d4bf39a --- /dev/null +++ b/jsonb-serde/src/main/resources/META-INF/native-image/com.arangodb/json-serde/native-image.properties @@ -0,0 +1,3 @@ +Args=\ +-H:ResourceConfigurationResources=${.}/resource-config-spi.json \ +-H:ReflectionConfigurationResources=${.}/reflect-config-spi.json diff --git a/jsonb-serde/src/main/resources/META-INF/native-image/com.arangodb/json-serde/reflect-config-spi.json b/jsonb-serde/src/main/resources/META-INF/native-image/com.arangodb/json-serde/reflect-config-spi.json new file mode 100644 index 000000000..5a63b3c30 --- /dev/null +++ b/jsonb-serde/src/main/resources/META-INF/native-image/com.arangodb/json-serde/reflect-config-spi.json @@ -0,0 +1,11 @@ +[ + { + "name": "com.arangodb.serde.jsonb.JsonbSerdeProvider", + "methods": [ + { + "name": "", + "parameterTypes": [] + } + ] + } +] diff --git a/jsonb-serde/src/main/resources/META-INF/native-image/com.arangodb/json-serde/resource-config-spi.json b/jsonb-serde/src/main/resources/META-INF/native-image/com.arangodb/json-serde/resource-config-spi.json new file mode 100644 index 000000000..520213dcf --- /dev/null +++ b/jsonb-serde/src/main/resources/META-INF/native-image/com.arangodb/json-serde/resource-config-spi.json @@ -0,0 +1,10 @@ +{ + "resources": { + "includes": [ + { + "pattern": "META-INF/services/com.arangodb.serde.ArangoSerdeProvider" + } + ] + }, + "bundles": [] +} diff --git a/jsonb-serde/src/main/resources/META-INF/services/com.arangodb.serde.ArangoSerdeProvider b/jsonb-serde/src/main/resources/META-INF/services/com.arangodb.serde.ArangoSerdeProvider new file mode 100644 index 000000000..7a37a64b1 --- /dev/null +++ b/jsonb-serde/src/main/resources/META-INF/services/com.arangodb.serde.ArangoSerdeProvider @@ -0,0 +1 @@ +com.arangodb.serde.jsonb.JsonbSerdeProvider diff --git a/pom.xml b/pom.xml index 089e226b9..6a5f4d191 100644 --- a/pom.xml +++ b/pom.xml @@ -1,278 +1,375 @@ - - 4.0.0 - - com.arangodb - arangodb-java-driver - 4.3.4-SNAPSHOT - 2016 - jar - - arangodb-java-driver - ArangoDB Java Driver - http://maven.apache.org - - - - Apache License 2.0 - http://www.apache.org/licenses/LICENSE-2.0 - repo - - - - - UTF-8 - 1.7.13 - 1.1.3 - 1.3 - 4.12 - 4.5.1 - 1.0.14 - - - - - mpv1989 - Mark Vollmary - https://github.com/mpv1989 - - - - - - ossrh - https://oss.sonatype.org/content/repositories/snapshots - - - ossrh - https://oss.sonatype.org/service/local/staging/deploy/maven2/ - - - - - - arangodb-snapshots - https://oss.sonatype.org/content/groups/staging - - - - - - doclint-java8-disable - - [1.8,) - - - -Xdoclint:none - - - - - - - - org.sonatype.plugins - nexus-staging-maven-plugin - 1.6.5 - true - - ossrh - https://oss.sonatype.org/ - 84aff6e87e214c - false - - - - - org.apache.maven.plugins - maven-assembly-plugin - 2.4.1 - - - assembly - package - - single - - - - - - ${project.artifactId}-${project.version}-standalone - - false - false - - jar-with-dependencies - - - - - - org.apache.maven.plugins - maven-compiler-plugin - 3.2 - - 1.6 - 1.6 - - - - - - org.apache.maven.plugins - maven-resources-plugin - 2.7 - - UTF-8 - - - - - org.apache.maven.plugins - maven-source-plugin - 2.4 - - - - jar - - - - - - - org.apache.maven.plugins - maven-javadoc-plugin - 2.9.1 - - - attach-javadocs - - jar - - - ${javadoc.opts} - - - - - - - maven-surefire-plugin - 2.19.1 - - -Dfile.encoding=UTF-8 - - **/*Test.java - **/*Example.java - - - - - - maven-deploy-plugin - 2.8.2 - - false - 10 - - - - - org.apache.maven.plugins - maven-gpg-plugin - 1.5 - - - sign-artifacts - verify - - sign - - - - - - - - - - - - org.apache.httpcomponents - httpclient - provided - - - com.arangodb - velocypack - - - org.slf4j - slf4j-api - - - ch.qos.logback - logback-classic - test - - - junit - junit - test - - - org.hamcrest - hamcrest-all - test - - - - - - - org.apache.httpcomponents - httpclient - ${httpclient.version} - - - com.arangodb - velocypack - ${arangodb.velocypack.version} - - - org.slf4j - slf4j-api - ${slf4j-api.version} - - - ch.qos.logback - logback-classic - ${logback-classic.version} - - - junit - junit - ${junit.version} - - - org.hamcrest - hamcrest-all - ${hamcrest-all.version} - - - - - - https://github.com/arangodb/arangodb-java-driver - scm:git:git://github.com/arangodb/arangodb-java-driver.git - scm:git:git://github.com/arangodb/arangodb-java-driver.git - - - - ArangoDB GmbH - https://www.arangodb.com - - - + + 4.0.0 + + com.arangodb + arangodb-java-driver-parent + 7.22.0 + 2016 + + release-parent + core + driver + shaded + jackson-serde-json + jackson-serde-vpack + jsonb-serde + http-protocol + vst-protocol + + pom + + arangodb-java-driver-parent + ArangoDB Java Driver Project + https://github.com/arangodb/arangodb-java-driver + + + + Apache License 2.0 + https://www.apache.org/licenses/LICENSE-2.0 + repo + + + + + 8 + 8 + 8 + UTF-8 + https://sonarcloud.io + arangodb-1 + target/spotbugsXml.xml + site/jacoco/jacoco.xml + 24.2.1 + + + + + Michele Rastelli + https://github.com/rashtao + + + mpv1989 + Mark Vollmary + https://github.com/mpv1989 + + + + + ArangoDB GmbH + https://www.arangodb.com + + + + + tests + + + maven.test.skip + !true + + + + test-parent + test-functional + test-non-functional + test-resilience + test-perf + + + + + + + + com.fasterxml.jackson + jackson-bom + 2.19.0 + import + pom + + + io.vertx + vertx-stack-depchain + 4.5.7 + pom + import + + + com.google.code.findbugs + jsr305 + 3.0.2 + + + org.slf4j + slf4j-api + 2.0.17 + + + jakarta.json + jakarta.json-api + 2.1.3 + + + com.arangodb + arangodb-java-driver + ${project.version} + + + com.arangodb + core + ${project.version} + + + com.arangodb + http-protocol + ${project.version} + + + com.arangodb + vst-protocol + ${project.version} + + + com.arangodb + jackson-serde-json + ${project.version} + + + com.arangodb + jackson-serde-vpack + ${project.version} + + + com.arangodb + jackson-dataformat-velocypack + 4.6.1 + + + com.arangodb + velocypack + 3.1.0 + + + com.arangodb + jsonb-serde + ${project.version} + + + com.arangodb + arangodb-java-driver-shaded + ${project.version} + + + + + + + + org.apache.maven.plugins + maven-compiler-plugin + + + org.apache.maven.plugins + maven-resources-plugin + + + org.apache.maven.plugins + maven-enforcer-plugin + 3.5.0 + + + enforce + + enforce + + + + + compile + 1.8 + + jakarta.json:jakarta.json-api + jakarta.json.bind:jakarta.json.bind-api + + + + + + + 3.6.3 + + + + + + + + org.codehaus.mojo + extra-enforcer-rules + 1.10.0 + + + + + org.codehaus.mojo + versions-maven-plugin + 2.18.0 + + + + + + + regex + (?i).*(alpha|beta|m|rc).*(\d+)? + + + + + io.vertx + + + regex + 5..* + + + + + io.netty + + + regex + .* + + + + + + + + + + + + + org.apache.maven.plugins + maven-deploy-plugin + 3.1.4 + + true + + + + org.apache.maven.plugins + maven-compiler-plugin + 3.14.0 + + + -Xlint:unchecked + -Xlint:deprecation + + true + + + + org.apache.maven.plugins + maven-resources-plugin + 3.3.1 + + UTF-8 + + + + org.apache.maven.plugins + maven-clean-plugin + 3.5.0 + + + org.apache.maven.plugins + maven-install-plugin + 3.1.4 + + + org.apache.maven.plugins + maven-site-plugin + 3.21.0 + + + org.apache.maven.plugins + maven-surefire-plugin + 3.5.3 + + true + + + + org.codehaus.mojo + build-helper-maven-plugin + 3.6.1 + + + org.jacoco + jacoco-maven-plugin + 0.8.13 + + + org.apache.maven.plugins + maven-jar-plugin + 3.4.2 + + + org.sonarsource.scanner.maven + sonar-maven-plugin + 5.1.0.4751 + + + org.apache.maven.plugins + maven-shade-plugin + 3.6.0 + + + com.google.code.maven-replacer-plugin + replacer + 1.5.3 + + + org.apache.maven.plugins + maven-surefire-report-plugin + 3.5.3 + + + org.codehaus.mojo + flatten-maven-plugin + 1.7.0 + + + org.apache.maven.plugins + maven-javadoc-plugin + 3.11.2 + + + + + + + + oss.sonatype.org-snapshot + https://oss.sonatype.org/content/repositories/snapshots + + false + + + true + + + + + + https://github.com/arangodb/arangodb-java-driver + scm:git:git://github.com/arangodb/arangodb-java-driver.git + scm:git:git://github.com/arangodb/arangodb-java-driver.git + + + diff --git a/release-parent/pom.xml b/release-parent/pom.xml new file mode 100644 index 000000000..ceed2a11b --- /dev/null +++ b/release-parent/pom.xml @@ -0,0 +1,219 @@ + + + 4.0.0 + + com.arangodb + arangodb-java-driver-parent + 7.22.0 + + pom + + release-parent + release-parent + Parent for releasable modules + https://github.com/arangodb/arangodb-java-driver + + + + Apache License 2.0 + https://www.apache.org/licenses/LICENSE-2.0 + repo + + + + + + Michele Rastelli + https://github.com/rashtao + + + mpv1989 + Mark Vollmary + https://github.com/mpv1989 + + + + + https://github.com/arangodb/arangodb-java-driver + scm:git:git://github.com/arangodb/arangodb-java-driver.git + scm:git:git://github.com/arangodb/arangodb-java-driver.git + + + + + + + + + + org.apache.maven.plugins + maven-source-plugin + 3.3.1 + + + + jar + + + + + + org.apache.maven.plugins + maven-javadoc-plugin + + + + jar + + + + com.arangodb.internal, + com.arangodb.internal.*, + com.arangodb.serde.jackson.internal, + javax.* + + none + + + + + + org.apache.maven.plugins + maven-gpg-plugin + 3.2.7 + + + --pinentry-mode + loopback + + + + + sign-artifacts + verify + + sign + + + + + + org.codehaus.mojo + flatten-maven-plugin + + oss + + + + + flatten + package + + flatten + + + + + flatten.clean + clean + + clean + + + + + + org.apache.maven.plugins + maven-jar-plugin + + + + ${project.name} + ${project.version} + ${moduleName} + + + true + + + + org.apache.maven.plugins + maven-clean-plugin + + + + ${project.basedir} + + **/dependency-reduced-pom.xml + + + + + + + org.sonatype.central + central-publishing-maven-plugin + 0.8.0 + true + + central + true + published + + + + + + + + static-code-analysis + + + + com.github.spotbugs + spotbugs-maven-plugin + 4.9.3.0 + + spotbugs/spotbugs-exclude.xml + + + + compile + + check + + + + + + com.github.spotbugs + spotbugs + 4.7.3 + + + + + org.jacoco + jacoco-maven-plugin + + + + report + + + + + ../test-functional/target/jacoco.exec + + XML + + + + + + + + + \ No newline at end of file diff --git a/shaded/pom.xml b/shaded/pom.xml new file mode 100644 index 000000000..5662f355c --- /dev/null +++ b/shaded/pom.xml @@ -0,0 +1,224 @@ + + + 4.0.0 + + + ../release-parent + com.arangodb + release-parent + 7.22.0 + + + arangodb-java-driver-shaded + arangodb-java-driver-shaded + ArangoDB Java Driver Shaded + + + com.arangodb.driver + src/main/java/graal/**/* + + + + + com.arangodb + core + compile + + + com.arangodb + http-protocol + compile + + + com.arangodb + vst-protocol + compile + + + com.arangodb + jackson-dataformat-velocypack + compile + + + org.slf4j + slf4j-api + compile + + + jakarta.json + jakarta.json-api + compile + + + + com.arangodb + arangodb-java-driver + provided + + + org.graalvm.sdk + graal-sdk + ${graalvm.version} + provided + + + + + + + org.apache.maven.plugins + maven-shade-plugin + + + shade-core-dependencies + package + + shade + + + true + true + true + true + true + + + org.slf4j:slf4j-api + jakarta.json:jakarta.json-api + + + + + com.fasterxml.jackson + com.arangodb.shaded.fasterxml.jackson + + + com.arangodb.jackson.dataformat.velocypack + com.arangodb.shaded.jackson.dataformat.velocypack + + + io.netty + com.arangodb.shaded.netty + + + io.vertx + com.arangodb.shaded.vertx + + + + + com.arangodb:core + + META-INF/** + + + + com.arangodb:vst-protocol + + META-INF/MANIFEST.MF + META-INF/services/** + META-INF/maven/** + + + + com.arangodb:http-protocol + + META-INF/MANIFEST.MF + META-INF/services/** + META-INF/maven/** + + + + com.arangodb:velocypack + + META-INF/** + + + + com.arangodb:jackson-dataformat-velocypack + + META-INF/** + + + + com.fasterxml.jackson.core:* + + META-INF/** + module-info.class + + + + io.netty:* + + META-INF/** + + + + io.vertx:* + + *.adoc + examples/** + META-INF/** + + + + com.fasterxml.jackson.datatype:jackson-datatype-jakarta-jsonp + + META-INF/MANIFEST.MF + + + + + + + + + org.codehaus.mojo + flatten-maven-plugin + + + + + + + javadoc + + + maven.javadoc.skip + !true + + + + + + org.codehaus.mojo + build-helper-maven-plugin + + + attach-javadocs + package + + attach-artifact + + + + + + ${project.parent.parent.basedir}/driver/target/arangodb-java-driver-${project.version}-javadoc.jar + + jar + javadoc + + + + + + + + + + + + \ No newline at end of file diff --git a/shaded/src/main/java/graal/BrotliSubstitutions.java b/shaded/src/main/java/graal/BrotliSubstitutions.java new file mode 100644 index 000000000..6f067fc6d --- /dev/null +++ b/shaded/src/main/java/graal/BrotliSubstitutions.java @@ -0,0 +1,22 @@ +package graal; + +import com.oracle.svm.core.annotate.Substitute; +import com.oracle.svm.core.annotate.TargetClass; + + +@TargetClass(className = "io.netty.handler.codec.compression.Brotli") +final class Target_io_netty_handler_codec_compression_Brotli { + @Substitute + public static boolean isAvailable() { + return false; + } + + @Substitute + public static void ensureAvailability() throws Throwable { + throw new UnsupportedOperationException(); + } +} + +public class BrotliSubstitutions { + +} diff --git a/shaded/src/main/java/graal/netty/EmptyByteBufStub.java b/shaded/src/main/java/graal/netty/EmptyByteBufStub.java new file mode 100644 index 000000000..1dc6dabf7 --- /dev/null +++ b/shaded/src/main/java/graal/netty/EmptyByteBufStub.java @@ -0,0 +1,33 @@ +package graal.netty; + +import io.netty.util.internal.PlatformDependent; + +import java.nio.ByteBuffer; + +public final class EmptyByteBufStub { + private static final ByteBuffer EMPTY_BYTE_BUFFER = ByteBuffer.allocateDirect(0); + private static final long EMPTY_BYTE_BUFFER_ADDRESS; + + static { + long emptyByteBufferAddress = 0; + try { + if (PlatformDependent.hasUnsafe()) { + emptyByteBufferAddress = PlatformDependent.directBufferAddress(EMPTY_BYTE_BUFFER); + } + } catch (Throwable t) { + // Ignore + } + EMPTY_BYTE_BUFFER_ADDRESS = emptyByteBufferAddress; + } + + public static ByteBuffer emptyByteBuffer() { + return EMPTY_BYTE_BUFFER; + } + + public static long emptyByteBufferAddress() { + return EMPTY_BYTE_BUFFER_ADDRESS; + } + + private EmptyByteBufStub() { + } +} diff --git a/shaded/src/main/java/graal/netty/graal/HttpContentCompressorSubstitutions.java b/shaded/src/main/java/graal/netty/graal/HttpContentCompressorSubstitutions.java new file mode 100644 index 000000000..92251b77b --- /dev/null +++ b/shaded/src/main/java/graal/netty/graal/HttpContentCompressorSubstitutions.java @@ -0,0 +1,69 @@ +package graal.netty.graal; + +import java.util.function.BooleanSupplier; + +import com.oracle.svm.core.annotate.Substitute; +import com.oracle.svm.core.annotate.TargetClass; + +import io.netty.buffer.ByteBuf; +import io.netty.channel.ChannelHandlerContext; + +public class HttpContentCompressorSubstitutions { + + @TargetClass(className = "io.netty.handler.codec.compression.ZstdEncoder", onlyWith = IsZstdAbsent.class) + public static final class ZstdEncoderFactorySubstitution { + + @Substitute + protected ByteBuf allocateBuffer(ChannelHandlerContext ctx, ByteBuf msg, boolean preferDirect) throws Exception { + throw new UnsupportedOperationException(); + } + + @Substitute + protected void encode(ChannelHandlerContext ctx, ByteBuf in, ByteBuf out) { + throw new UnsupportedOperationException(); + } + + @Substitute + public void flush(final ChannelHandlerContext ctx) { + throw new UnsupportedOperationException(); + } + } + + @Substitute + @TargetClass(className = "io.netty.handler.codec.compression.ZstdConstants", onlyWith = IsZstdAbsent.class) + public static final class ZstdConstants { + + // The constants make calls to com.github.luben.zstd.Zstd so we cut links with that substitution. + + static final int DEFAULT_COMPRESSION_LEVEL = 0; + + static final int MIN_COMPRESSION_LEVEL = 0; + + static final int MAX_COMPRESSION_LEVEL = 0; + + static final int MAX_BLOCK_SIZE = 0; + + static final int DEFAULT_BLOCK_SIZE = 0; + } + + public static class IsZstdAbsent implements BooleanSupplier { + + private boolean zstdAbsent; + + public IsZstdAbsent() { + try { + Class.forName("com.github.luben.zstd.Zstd"); + zstdAbsent = false; + } catch (Exception e) { + // It can be a classloading issue (the library is not available), or a native issue + // (the library for the current OS/arch is not available) + zstdAbsent = true; + } + } + + @Override + public boolean getAsBoolean() { + return zstdAbsent; + } + } +} diff --git a/shaded/src/main/java/graal/netty/graal/NettySubstitutions.java b/shaded/src/main/java/graal/netty/graal/NettySubstitutions.java new file mode 100644 index 000000000..4eab2181b --- /dev/null +++ b/shaded/src/main/java/graal/netty/graal/NettySubstitutions.java @@ -0,0 +1,604 @@ +package graal.netty.graal; + +import com.oracle.svm.core.annotate.Alias; +import com.oracle.svm.core.annotate.RecomputeFieldValue; +import com.oracle.svm.core.annotate.RecomputeFieldValue.Kind; +import com.oracle.svm.core.annotate.Substitute; +import com.oracle.svm.core.annotate.TargetClass; +import graal.netty.EmptyByteBufStub; +import io.netty.bootstrap.AbstractBootstrapConfig; +import io.netty.bootstrap.ChannelFactory; +import io.netty.buffer.ByteBuf; +import io.netty.buffer.ByteBufAllocator; +import io.netty.channel.*; +import io.netty.channel.embedded.EmbeddedChannel; +import io.netty.handler.codec.compression.ZlibCodecFactory; +import io.netty.handler.codec.compression.ZlibWrapper; +import io.netty.handler.codec.http.HttpHeaderValues; +import io.netty.handler.codec.http2.Http2Exception; +import io.netty.handler.ssl.*; +import io.netty.handler.ssl.ApplicationProtocolConfig.SelectorFailureBehavior; +import io.netty.util.concurrent.GlobalEventExecutor; +import io.netty.util.internal.logging.InternalLoggerFactory; +import io.netty.util.internal.logging.JdkLoggerFactory; + +import javax.crypto.NoSuchPaddingException; +import javax.net.ssl.KeyManagerFactory; +import javax.net.ssl.SSLEngine; +import javax.net.ssl.SSLException; +import javax.net.ssl.TrustManagerFactory; +import java.io.File; +import java.io.IOException; +import java.io.InputStream; +import java.nio.ByteBuffer; +import java.security.*; +import java.security.cert.X509Certificate; +import java.security.spec.InvalidKeySpecException; +import java.util.*; +import java.util.concurrent.LinkedBlockingDeque; +import java.util.function.BooleanSupplier; + +import static io.netty.handler.codec.http.HttpHeaderValues.*; + +/** + * This substitution avoid having loggers added to the build + */ +@TargetClass(className = "io.netty.util.internal.logging.InternalLoggerFactory") +final class Target_io_netty_util_internal_logging_InternalLoggerFactory { + + @Substitute + private static InternalLoggerFactory newDefaultFactory(String name) { + return JdkLoggerFactory.INSTANCE; + } +} + +// SSL +// This whole section is mostly about removing static analysis references to openssl/tcnative + +@TargetClass(className = "io.netty.handler.ssl.SslProvider") +final class Target_io_netty_handler_ssl_SslProvider { + @Substitute + public static boolean isAlpnSupported(final SslProvider provider) { + switch (provider) { + case JDK: + return Target_io_netty_handler_ssl_JdkAlpnApplicationProtocolNegotiator.isAlpnSupported(); + case OPENSSL: + case OPENSSL_REFCNT: + return false; + default: + throw new Error("SslProvider unsupported on Quarkus " + provider); + } + } +} + +@TargetClass(className = "io.netty.handler.ssl.JdkAlpnApplicationProtocolNegotiator") +final class Target_io_netty_handler_ssl_JdkAlpnApplicationProtocolNegotiator { + @Alias + static boolean isAlpnSupported() { + return true; + } +} + +/** + * Hardcode io.netty.handler.ssl.OpenSsl as non-available + */ +@TargetClass(className = "io.netty.handler.ssl.OpenSsl") +final class Target_io_netty_handler_ssl_OpenSsl { + + @Alias + @RecomputeFieldValue(kind = Kind.FromAlias) + private static Throwable UNAVAILABILITY_CAUSE = new RuntimeException("OpenSsl unsupported on Quarkus"); + + @Alias + @RecomputeFieldValue(kind = Kind.FromAlias) + static List DEFAULT_CIPHERS = Collections.emptyList(); + + @Alias + @RecomputeFieldValue(kind = Kind.FromAlias) + static Set AVAILABLE_CIPHER_SUITES = Collections.emptySet(); + + @Alias + @RecomputeFieldValue(kind = Kind.FromAlias) + private static Set AVAILABLE_OPENSSL_CIPHER_SUITES = Collections.emptySet(); + + @Alias + @RecomputeFieldValue(kind = Kind.FromAlias) + private static Set AVAILABLE_JAVA_CIPHER_SUITES = Collections.emptySet(); + + @Alias + @RecomputeFieldValue(kind = Kind.FromAlias) + private static boolean SUPPORTS_KEYMANAGER_FACTORY = false; + + @Alias + @RecomputeFieldValue(kind = Kind.FromAlias) + private static boolean SUPPORTS_OCSP = false; + + @Alias + @RecomputeFieldValue(kind = Kind.FromAlias) + static Set SUPPORTED_PROTOCOLS_SET = Collections.emptySet(); + + @Substitute + public static boolean isAvailable() { + return false; + } + + @Substitute + public static int version() { + return -1; + } + + @Substitute + public static String versionString() { + return null; + } + + @Substitute + public static boolean isCipherSuiteAvailable(String cipherSuite) { + return false; + } +} + +@TargetClass(className = "io.netty.handler.ssl.JdkSslServerContext") +final class Target_io_netty_handler_ssl_JdkSslServerContext { + + @Alias + Target_io_netty_handler_ssl_JdkSslServerContext(Provider provider, + X509Certificate[] trustCertCollection, TrustManagerFactory trustManagerFactory, + X509Certificate[] keyCertChain, PrivateKey key, String keyPassword, + KeyManagerFactory keyManagerFactory, Iterable ciphers, CipherSuiteFilter cipherFilter, + ApplicationProtocolConfig apn, long sessionCacheSize, long sessionTimeout, + ClientAuth clientAuth, String[] protocols, boolean startTls, + String keyStore) + throws SSLException { + } +} + +@TargetClass(className = "io.netty.handler.ssl.JdkSslClientContext") +final class Target_io_netty_handler_ssl_JdkSslClientContext { + + @Alias + Target_io_netty_handler_ssl_JdkSslClientContext(Provider sslContextProvider, X509Certificate[] trustCertCollection, + TrustManagerFactory trustManagerFactory, X509Certificate[] keyCertChain, PrivateKey key, + String keyPassword, KeyManagerFactory keyManagerFactory, Iterable ciphers, + CipherSuiteFilter cipherFilter, ApplicationProtocolConfig apn, String[] protocols, + long sessionCacheSize, long sessionTimeout, String keyStoreType) + throws SSLException { + + } +} +@TargetClass(className = "io.netty.handler.ssl.SslHandler$SslEngineType") +final class Target_io_netty_handler_ssl_SslHandler$SslEngineType { + + @Alias + public static Target_io_netty_handler_ssl_SslHandler$SslEngineType JDK; + + @Substitute + static Target_io_netty_handler_ssl_SslHandler$SslEngineType forEngine(SSLEngine engine) { + return JDK; + } +} + +@TargetClass(className = "io.netty.handler.ssl.JdkAlpnApplicationProtocolNegotiator$AlpnWrapper") +final class Target_io_netty_handler_ssl_JdkAlpnApplicationProtocolNegotiator_AlpnWrapper { + @Substitute + public SSLEngine wrapSslEngine(SSLEngine engine, ByteBufAllocator alloc, + JdkApplicationProtocolNegotiator applicationNegotiator, boolean isServer) { + return (SSLEngine) (Object) new Target_io_netty_handler_ssl_JdkAlpnSslEngine(engine, applicationNegotiator, + isServer); + } + +} + +@TargetClass(className = "io.netty.handler.ssl.JdkAlpnSslEngine") +final class Target_io_netty_handler_ssl_JdkAlpnSslEngine { + @Alias + Target_io_netty_handler_ssl_JdkAlpnSslEngine(final SSLEngine engine, + final JdkApplicationProtocolNegotiator applicationNegotiator, final boolean isServer) { + + } +} + +@TargetClass(className = "io.netty.handler.ssl.SslContext") +final class Target_io_netty_handler_ssl_SslContext { + + @Substitute + static SslContext newServerContextInternal(SslProvider provider, Provider sslContextProvider, + X509Certificate[] trustCertCollection, TrustManagerFactory trustManagerFactory, + X509Certificate[] keyCertChain, + PrivateKey key, String keyPassword, KeyManagerFactory keyManagerFactory, Iterable ciphers, + CipherSuiteFilter cipherFilter, ApplicationProtocolConfig apn, long sessionCacheSize, long sessionTimeout, + ClientAuth clientAuth, String[] protocols, boolean startTls, boolean enableOcsp, String keyStoreType, + Map.Entry, Object>... ctxOptions) throws SSLException { + if (enableOcsp) { + throw new IllegalArgumentException("OCSP is not supported with this SslProvider: " + provider); + } + return (SslContext) (Object) new Target_io_netty_handler_ssl_JdkSslServerContext(sslContextProvider, + trustCertCollection, trustManagerFactory, keyCertChain, key, keyPassword, + keyManagerFactory, ciphers, cipherFilter, apn, sessionCacheSize, sessionTimeout, + clientAuth, protocols, startTls, keyStoreType); + } + + @Substitute + static SslContext newClientContextInternal(SslProvider provider, Provider sslContextProvider, + X509Certificate[] trustCert, + TrustManagerFactory trustManagerFactory, X509Certificate[] keyCertChain, PrivateKey key, String keyPassword, + KeyManagerFactory keyManagerFactory, Iterable ciphers, CipherSuiteFilter cipherFilter, + ApplicationProtocolConfig apn, String[] protocols, long sessionCacheSize, long sessionTimeout, + boolean enableOcsp, + String keyStoreType, Map.Entry, Object>... options) throws SSLException { + if (enableOcsp) { + throw new IllegalArgumentException("OCSP is not supported with this SslProvider: " + provider); + } + return (SslContext) (Object) new Target_io_netty_handler_ssl_JdkSslClientContext(sslContextProvider, + trustCert, trustManagerFactory, keyCertChain, key, keyPassword, + keyManagerFactory, ciphers, cipherFilter, apn, protocols, sessionCacheSize, + sessionTimeout, keyStoreType); + } + +} +@TargetClass(className = "io.netty.handler.ssl.JdkDefaultApplicationProtocolNegotiator") +final class Target_io_netty_handler_ssl_JdkDefaultApplicationProtocolNegotiator { + + @Alias + public static Target_io_netty_handler_ssl_JdkDefaultApplicationProtocolNegotiator INSTANCE; +} + +@TargetClass(className = "io.netty.handler.ssl.JdkSslContext") +final class Target_io_netty_handler_ssl_JdkSslContext { + + @Substitute + static JdkApplicationProtocolNegotiator toNegotiator(ApplicationProtocolConfig config, boolean isServer) { + if (config == null) { + return (JdkApplicationProtocolNegotiator) (Object) Target_io_netty_handler_ssl_JdkDefaultApplicationProtocolNegotiator.INSTANCE; + } + + switch (config.protocol()) { + case NONE: + return (JdkApplicationProtocolNegotiator) (Object) Target_io_netty_handler_ssl_JdkDefaultApplicationProtocolNegotiator.INSTANCE; + case ALPN: + if (isServer) { + // GRAAL RC9 bug: https://github.com/oracle/graal/issues/813 + // switch(config.selectorFailureBehavior()) { + // case FATAL_ALERT: + // return new JdkAlpnApplicationProtocolNegotiator(true, config.supportedProtocols()); + // case NO_ADVERTISE: + // return new JdkAlpnApplicationProtocolNegotiator(false, config.supportedProtocols()); + // default: + // throw new UnsupportedOperationException(new StringBuilder("JDK provider does not support ") + // .append(config.selectorFailureBehavior()).append(" failure behavior").toString()); + // } + SelectorFailureBehavior behavior = config.selectorFailureBehavior(); + if (behavior == SelectorFailureBehavior.FATAL_ALERT) { + return new JdkAlpnApplicationProtocolNegotiator(true, config.supportedProtocols()); + } else if (behavior == SelectorFailureBehavior.NO_ADVERTISE) { + return new JdkAlpnApplicationProtocolNegotiator(false, config.supportedProtocols()); + } else { + throw new UnsupportedOperationException(new StringBuilder("JDK provider does not support ") + .append(config.selectorFailureBehavior()).append(" failure behavior").toString()); + } + } else { + switch (config.selectedListenerFailureBehavior()) { + case ACCEPT: + return new JdkAlpnApplicationProtocolNegotiator(false, config.supportedProtocols()); + case FATAL_ALERT: + return new JdkAlpnApplicationProtocolNegotiator(true, config.supportedProtocols()); + default: + throw new UnsupportedOperationException(new StringBuilder("JDK provider does not support ") + .append(config.selectedListenerFailureBehavior()).append(" failure behavior") + .toString()); + } + } + default: + throw new UnsupportedOperationException( + new StringBuilder("JDK provider does not support ").append(config.protocol()) + .append(" protocol") + .toString()); + } + } + +} + +/* + * This one only prints exceptions otherwise we get a useless bogus + * exception message: https://github.com/eclipse-vertx/vert.x/issues/1657 + */ +@TargetClass(className = "io.netty.bootstrap.AbstractBootstrap") +final class Target_io_netty_bootstrap_AbstractBootstrap { + + @Alias + private ChannelFactory channelFactory; + + @Alias + void init(Channel channel) throws Exception { + } + + @Alias + public AbstractBootstrapConfig config() { + return null; + } + + @Substitute + final ChannelFuture initAndRegister() { + Channel channel = null; + try { + channel = channelFactory.newChannel(); + init(channel); + } catch (Throwable t) { + // THE FIX IS HERE: + t.printStackTrace(); + if (channel != null) { + // channel can be null if newChannel crashed (eg SocketException("too many open files")) + channel.unsafe().closeForcibly(); + } + // as the Channel is not registered yet, we need to force the usage of the GlobalEventExecutor + return new DefaultChannelPromise(channel, GlobalEventExecutor.INSTANCE).setFailure(t); + } + + ChannelFuture regFuture = config().group().register(channel); + if (regFuture.cause() != null) { + if (channel.isRegistered()) { + channel.close(); + } else { + channel.unsafe().closeForcibly(); + } + } + + // If we are here and the promise is not failed, it's one of the following cases: + // 1) If we attempted registration from the event loop, the registration has been completed at this point. + // i.e. It's safe to attempt bind() or connect() now because the channel has been registered. + // 2) If we attempted registration from the other thread, the registration request has been successfully + // added to the event loop's task queue for later execution. + // i.e. It's safe to attempt bind() or connect() now: + // because bind() or connect() will be executed *after* the scheduled registration task is executed + // because register(), bind(), and connect() are all bound to the same thread. + + return regFuture; + + } +} + +@TargetClass(className = "io.netty.channel.nio.NioEventLoop") +final class Target_io_netty_channel_nio_NioEventLoop { + + @Substitute + private static Queue newTaskQueue0(int maxPendingTasks) { + return new LinkedBlockingDeque<>(); + } +} + +@TargetClass(className = "io.netty.buffer.AbstractReferenceCountedByteBuf") +final class Target_io_netty_buffer_AbstractReferenceCountedByteBuf { + + @Alias + @RecomputeFieldValue(kind = Kind.FieldOffset, name = "refCnt") + private static long REFCNT_FIELD_OFFSET; +} + +@TargetClass(className = "io.netty.util.AbstractReferenceCounted") +final class Target_io_netty_util_AbstractReferenceCounted { + + @Alias + @RecomputeFieldValue(kind = Kind.FieldOffset, name = "refCnt") + private static long REFCNT_FIELD_OFFSET; +} + +// This class is runtime-initialized by NettyProcessor +final class Holder_io_netty_util_concurrent_ScheduledFutureTask { + static final long START_TIME = System.nanoTime(); +} + +@TargetClass(className = "io.netty.util.concurrent.AbstractScheduledEventExecutor") +final class Target_io_netty_util_concurrent_AbstractScheduledEventExecutor { + + // The START_TIME field is kept but not used. + // All the accesses to it have been replaced with Holder_io_netty_util_concurrent_ScheduledFutureTask + + @Substitute + static long initialNanoTime() { + return Holder_io_netty_util_concurrent_ScheduledFutureTask.START_TIME; + } + + @Substitute + static long defaultCurrentTimeNanos() { + return System.nanoTime() - Holder_io_netty_util_concurrent_ScheduledFutureTask.START_TIME; + } +} + +@TargetClass(className = "io.netty.channel.ChannelHandlerMask") +final class Target_io_netty_channel_ChannelHandlerMask { + + // Netty tries to self-optimized itself, but it requires lots of reflection. We disable this behavior and avoid + // misleading DEBUG messages in the log. + @Substitute + private static boolean isSkippable(final Class handlerType, final String methodName, final Class... paramTypes) { + return false; + } +} + +@TargetClass(className = "io.netty.util.internal.NativeLibraryLoader") +final class Target_io_netty_util_internal_NativeLibraryLoader { + + // This method can trick GraalVM into thinking that Classloader#defineClass is getting called + @Substitute + static Class tryToLoadClass(final ClassLoader loader, final Class helper) + throws ClassNotFoundException { + return Class.forName(helper.getName(), false, loader); + } + +} + +@TargetClass(className = "io.netty.buffer.EmptyByteBuf") +final class Target_io_netty_buffer_EmptyByteBuf { + + @Alias + @RecomputeFieldValue(kind = Kind.Reset) + private static ByteBuffer EMPTY_BYTE_BUFFER; + + @Alias + @RecomputeFieldValue(kind = Kind.Reset) + private static long EMPTY_BYTE_BUFFER_ADDRESS; + + @Substitute + public ByteBuffer nioBuffer() { + return EmptyByteBufStub.emptyByteBuffer(); + } + + @Substitute + public ByteBuffer[] nioBuffers() { + return new ByteBuffer[] { EmptyByteBufStub.emptyByteBuffer() }; + } + + @Substitute + public ByteBuffer internalNioBuffer(int index, int length) { + return EmptyByteBufStub.emptyByteBuffer(); + } + + @Substitute + public boolean hasMemoryAddress() { + return EmptyByteBufStub.emptyByteBufferAddress() != 0; + } + + @Substitute + public long memoryAddress() { + if (hasMemoryAddress()) { + return EmptyByteBufStub.emptyByteBufferAddress(); + } else { + throw new UnsupportedOperationException(); + } + } + +} + +@TargetClass(className = "io.netty.handler.codec.http.HttpContentDecompressor") +final class Target_io_netty_handler_codec_http_HttpContentDecompressor { + + @Alias + private boolean strict; + + @Alias + protected ChannelHandlerContext ctx; + + @Substitute + protected EmbeddedChannel newContentDecoder(String contentEncoding) throws Exception { + if (GZIP.contentEqualsIgnoreCase(contentEncoding) || + X_GZIP.contentEqualsIgnoreCase(contentEncoding)) { + return new EmbeddedChannel(ctx.channel().id(), ctx.channel().metadata().hasDisconnect(), + ctx.channel().config(), ZlibCodecFactory.newZlibDecoder(ZlibWrapper.GZIP)); + } + if (DEFLATE.contentEqualsIgnoreCase(contentEncoding) || + X_DEFLATE.contentEqualsIgnoreCase(contentEncoding)) { + final ZlibWrapper wrapper = strict ? ZlibWrapper.ZLIB : ZlibWrapper.ZLIB_OR_NONE; + // To be strict, 'deflate' means ZLIB, but some servers were not implemented correctly. + return new EmbeddedChannel(ctx.channel().id(), ctx.channel().metadata().hasDisconnect(), + ctx.channel().config(), ZlibCodecFactory.newZlibDecoder(wrapper)); + } + + // 'identity' or unsupported + return null; + } +} + +@TargetClass(className = "io.netty.handler.codec.http2.DelegatingDecompressorFrameListener") +final class Target_io_netty_handler_codec_http2_DelegatingDecompressorFrameListener { + + @Alias + boolean strict; + + @Substitute + protected EmbeddedChannel newContentDecompressor(ChannelHandlerContext ctx, CharSequence contentEncoding) + throws Http2Exception { + if (!HttpHeaderValues.GZIP.contentEqualsIgnoreCase(contentEncoding) + && !HttpHeaderValues.X_GZIP.contentEqualsIgnoreCase(contentEncoding)) { + if (!HttpHeaderValues.DEFLATE.contentEqualsIgnoreCase(contentEncoding) + && !HttpHeaderValues.X_DEFLATE.contentEqualsIgnoreCase(contentEncoding)) { + return null; + } else { + ZlibWrapper wrapper = this.strict ? ZlibWrapper.ZLIB : ZlibWrapper.ZLIB_OR_NONE; + return new EmbeddedChannel(ctx.channel().id(), ctx.channel().metadata().hasDisconnect(), ctx.channel().config(), + new ChannelHandler[] { ZlibCodecFactory.newZlibDecoder(wrapper) }); + } + } else { + return new EmbeddedChannel(ctx.channel().id(), ctx.channel().metadata().hasDisconnect(), ctx.channel().config(), + new ChannelHandler[] { ZlibCodecFactory.newZlibDecoder(ZlibWrapper.GZIP) }); + } + } +} + +@TargetClass(className = "io.netty.handler.ssl.SslHandler") +final class Target_SslHandler { + + @Substitute + private void setOpensslEngineSocketFd(Channel c) { + // do nothing. + } +} + +@TargetClass(className = "io.netty.handler.ssl.PemReader") +final class Alias_PemReader { + + @Alias + public static ByteBuf readPrivateKey(File keyFile) { + return null; + } + + @Alias + public static ByteBuf readPrivateKey(InputStream in) throws KeyException { + return null; + } +} + +/** + * If BouncyCastle is not on the classpath, we must not try to read the PEM file using the BouncyCatle PEM reader. + */ +@TargetClass(className = "io.netty.handler.ssl.SslContext", onlyWith = IsBouncyNotThere.class) +final class Target_SslContext { + + @Substitute + protected static PrivateKey toPrivateKey(File keyFile, String keyPassword) throws NoSuchAlgorithmException, + NoSuchPaddingException, InvalidKeySpecException, + InvalidAlgorithmParameterException, + KeyException, IOException { + if (keyFile == null) { + return null; + } + + return getPrivateKeyFromByteBuffer(Alias_PemReader.readPrivateKey(keyFile), keyPassword); + } + + @Substitute + protected static PrivateKey toPrivateKey(InputStream keyInputStream, String keyPassword) + throws NoSuchAlgorithmException, + NoSuchPaddingException, InvalidKeySpecException, + InvalidAlgorithmParameterException, + KeyException, IOException { + if (keyInputStream == null) { + return null; + } + + return getPrivateKeyFromByteBuffer(Alias_PemReader.readPrivateKey(keyInputStream), keyPassword); + } + + @Alias + private static PrivateKey getPrivateKeyFromByteBuffer(ByteBuf encodedKeyBuf, String keyPassword) + throws NoSuchAlgorithmException, NoSuchPaddingException, InvalidKeySpecException, + InvalidAlgorithmParameterException, KeyException, IOException { + return null; + } +} + +class IsBouncyNotThere implements BooleanSupplier { + + @Override + public boolean getAsBoolean() { + try { + NettySubstitutions.class.getClassLoader().loadClass("org.bouncycastle.openssl.PEMParser"); + return false; + } catch (Exception e) { + return true; + } + } +} + +class NettySubstitutions { + +} diff --git a/shaded/src/main/java/graal/netty/graal/ZLibSubstitutions.java b/shaded/src/main/java/graal/netty/graal/ZLibSubstitutions.java new file mode 100644 index 000000000..7017aaa86 --- /dev/null +++ b/shaded/src/main/java/graal/netty/graal/ZLibSubstitutions.java @@ -0,0 +1,66 @@ +package graal.netty.graal; + +import com.oracle.svm.core.annotate.Substitute; +import com.oracle.svm.core.annotate.TargetClass; +import io.netty.handler.codec.compression.*; + +/** + * This substitution avoid having jcraft zlib added to the build + */ +@TargetClass(className = "io.netty.handler.codec.compression.ZlibCodecFactory") +final class Target_io_netty_handler_codec_compression_ZlibCodecFactory { + + @Substitute + public static ZlibEncoder newZlibEncoder(int compressionLevel) { + return new JdkZlibEncoder(compressionLevel); + } + + @Substitute + public static ZlibEncoder newZlibEncoder(ZlibWrapper wrapper) { + return new JdkZlibEncoder(wrapper); + } + + @Substitute + public static ZlibEncoder newZlibEncoder(ZlibWrapper wrapper, int compressionLevel) { + return new JdkZlibEncoder(wrapper, compressionLevel); + } + + @Substitute + public static ZlibEncoder newZlibEncoder(ZlibWrapper wrapper, int compressionLevel, int windowBits, int memLevel) { + return new JdkZlibEncoder(wrapper, compressionLevel); + } + + @Substitute + public static ZlibEncoder newZlibEncoder(byte[] dictionary) { + return new JdkZlibEncoder(dictionary); + } + + @Substitute + public static ZlibEncoder newZlibEncoder(int compressionLevel, byte[] dictionary) { + return new JdkZlibEncoder(compressionLevel, dictionary); + } + + @Substitute + public static ZlibEncoder newZlibEncoder(int compressionLevel, int windowBits, int memLevel, byte[] dictionary) { + return new JdkZlibEncoder(compressionLevel, dictionary); + } + + @Substitute + public static ZlibDecoder newZlibDecoder() { + return new JdkZlibDecoder(); + } + + @Substitute + public static ZlibDecoder newZlibDecoder(ZlibWrapper wrapper) { + return new JdkZlibDecoder(wrapper); + } + + @Substitute + public static ZlibDecoder newZlibDecoder(byte[] dictionary) { + return new JdkZlibDecoder(dictionary); + } +} + +class ZLibSubstitutions { + +} diff --git a/shaded/src/main/java/graal/netty/package-info.java b/shaded/src/main/java/graal/netty/package-info.java new file mode 100644 index 000000000..8b55354f7 --- /dev/null +++ b/shaded/src/main/java/graal/netty/package-info.java @@ -0,0 +1,4 @@ +/** + * from io.quarkus:quarkus-netty:3.10.1 + */ +package graal.netty; diff --git a/shaded/src/main/java/graal/vertx/graal/VertxSubstitutions.java b/shaded/src/main/java/graal/vertx/graal/VertxSubstitutions.java new file mode 100644 index 000000000..d8ca211b6 --- /dev/null +++ b/shaded/src/main/java/graal/vertx/graal/VertxSubstitutions.java @@ -0,0 +1,197 @@ +package graal.vertx.graal; + +import com.oracle.svm.core.annotate.Alias; +import com.oracle.svm.core.annotate.Substitute; +import com.oracle.svm.core.annotate.TargetClass; +import io.netty.handler.ssl.*; +import io.vertx.core.MultiMap; +import io.vertx.core.Promise; +import io.vertx.core.Vertx; +import io.vertx.core.dns.AddressResolverOptions; +import io.vertx.core.eventbus.EventBusOptions; +import io.vertx.core.eventbus.impl.HandlerHolder; +import io.vertx.core.eventbus.impl.HandlerRegistration; +import io.vertx.core.eventbus.impl.MessageImpl; +import io.vertx.core.eventbus.impl.OutboundDeliveryContext; +import io.vertx.core.impl.ContextInternal; +import io.vertx.core.impl.VertxInternal; +import io.vertx.core.impl.resolver.DefaultResolverProvider; +import io.vertx.core.impl.transports.JDKTransport; +import io.vertx.core.net.NetServerOptions; +import io.vertx.core.spi.resolver.ResolverProvider; +import io.vertx.core.spi.transport.Transport; + +import javax.net.ssl.KeyManagerFactory; +import javax.net.ssl.SSLException; +import javax.net.ssl.TrustManagerFactory; +import java.util.Collection; +import java.util.List; +import java.util.Set; +import java.util.concurrent.ConcurrentMap; + +@TargetClass(className = "io.vertx.core.impl.VertxBuilder") +final class Target_io_vertx_core_impl_VertxBuilder { + @Substitute + public static Transport nativeTransport() { + return JDKTransport.INSTANCE; + } +} + +/** + * This substitution forces the usage of the blocking DNS resolver + */ +@TargetClass(className = "io.vertx.core.spi.resolver.ResolverProvider") +final class TargetResolverProvider { + + @Substitute + public static ResolverProvider factory(Vertx vertx, AddressResolverOptions options) { + return new DefaultResolverProvider(); + } +} + +@TargetClass(className = "io.vertx.core.net.OpenSSLEngineOptions") +final class Target_io_vertx_core_net_OpenSSLEngineOptions { + + @Substitute + public static boolean isAvailable() { + return false; + } + + @Substitute + public static boolean isAlpnAvailable() { + return false; + } +} + +@SuppressWarnings("rawtypes") +@TargetClass(className = "io.vertx.core.eventbus.impl.clustered.ClusteredEventBus") +final class Target_io_vertx_core_eventbus_impl_clustered_ClusteredEventBusClusteredEventBus { + + @Substitute + private NetServerOptions getServerOptions() { + throw new RuntimeException("Not Implemented"); + } + + @Substitute + public void start(Promise promise) { + throw new RuntimeException("Not Implemented"); + } + + @Substitute + public void close(Promise promise) { + throw new RuntimeException("Not Implemented"); + } + + @Substitute + public MessageImpl createMessage(boolean send, boolean isLocal, String address, MultiMap headers, Object body, + String codecName) { + throw new RuntimeException("Not Implemented"); + } + + @Substitute + protected void onLocalRegistration(HandlerHolder handlerHolder, Promise promise) { + throw new RuntimeException("Not Implemented"); + } + + @Substitute + protected HandlerHolder createHandlerHolder(HandlerRegistration registration, boolean replyHandler, + boolean localOnly, ContextInternal context) { + throw new RuntimeException("Not Implemented"); + } + + @Substitute + protected void onLocalUnregistration(HandlerHolder handlerHolder, Promise completionHandler) { + throw new RuntimeException("Not Implemented"); + } + + @Substitute + protected void sendOrPub(OutboundDeliveryContext sendContext) { + throw new RuntimeException("Not Implemented"); + } + + @Substitute + protected String generateReplyAddress() { + throw new RuntimeException("Not Implemented"); + } + + @Substitute + protected boolean isMessageLocal(MessageImpl msg) { + throw new RuntimeException("Not Implemented"); + } + + @Substitute + ConcurrentMap connections() { + throw new RuntimeException("Not Implemented"); + } + + @Substitute + VertxInternal vertx() { + throw new RuntimeException("Not Implemented"); + } + + @Substitute + EventBusOptions options() { + throw new RuntimeException("Not Implemented"); + } +} + +@TargetClass(className = "io.vertx.core.spi.tls.DefaultSslContextFactory") +final class Target_DefaultSslContextFactory { + + @Alias + private Set enabledCipherSuites; + + @Alias + private List applicationProtocols; + + @Alias + private ClientAuth clientAuth; + + @Substitute + private SslContext createContext(boolean useAlpn, boolean client, KeyManagerFactory kmf, TrustManagerFactory tmf) + throws SSLException { + SslContextBuilder builder; + if (client) { + builder = SslContextBuilder.forClient(); + if (kmf != null) { + builder.keyManager(kmf); + } + } else { + builder = SslContextBuilder.forServer(kmf); + } + Collection cipherSuites = enabledCipherSuites; + builder.sslProvider(SslProvider.JDK); + if (cipherSuites == null || cipherSuites.isEmpty()) { + cipherSuites = Target_io_vertx_core_spi_tls_DefaultJDKCipherSuite.get(); + } + if (tmf != null) { + builder.trustManager(tmf); + } + if (cipherSuites != null && cipherSuites.size() > 0) { + builder.ciphers(cipherSuites); + } + if (useAlpn && applicationProtocols != null && applicationProtocols.size() > 0) { + builder.applicationProtocolConfig(new ApplicationProtocolConfig( + ApplicationProtocolConfig.Protocol.ALPN, + ApplicationProtocolConfig.SelectorFailureBehavior.NO_ADVERTISE, + ApplicationProtocolConfig.SelectedListenerFailureBehavior.ACCEPT, + applicationProtocols)); + } + if (clientAuth != null) { + builder.clientAuth(clientAuth); + } + return builder.build(); + } +} + +@TargetClass(className = "io.vertx.core.spi.tls.DefaultJDKCipherSuite") +final class Target_io_vertx_core_spi_tls_DefaultJDKCipherSuite { + @Alias + static List get() { + return null; + } +} + +class VertxSubstitutions { + +} diff --git a/shaded/src/main/java/graal/vertx/graal/package-info.java b/shaded/src/main/java/graal/vertx/graal/package-info.java new file mode 100644 index 000000000..f6cb91e99 --- /dev/null +++ b/shaded/src/main/java/graal/vertx/graal/package-info.java @@ -0,0 +1,4 @@ +/** + * from io.quarkus:quarkus-vertx:3.10.1 + */ +package graal.vertx.graal; diff --git a/shaded/src/main/resources/META-INF/native-image/com.arangodb/arangodb-java-driver-shaded/native-image.properties b/shaded/src/main/resources/META-INF/native-image/com.arangodb/arangodb-java-driver-shaded/native-image.properties new file mode 100644 index 000000000..d3c564e31 --- /dev/null +++ b/shaded/src/main/resources/META-INF/native-image/com.arangodb/arangodb-java-driver-shaded/native-image.properties @@ -0,0 +1,44 @@ +Args=\ +-H:ResourceConfigurationResources=${.}/resource-config.json,${.}/resource-config-spi.json \ +-H:ReflectionConfigurationResources=${.}/reflect-config.json,${.}/reflect-config-serde.json,${.}/reflect-config-spi.json,${.}/reflect-config-mp-config.json \ +-H:SerializationConfigurationResources=${.}/serialization-config.json \ +-H:DynamicProxyConfigurationResources=${.}/proxy-config.json \ +-Dcom.arangodb.shaded.netty.noUnsafe=true \ +-Dcom.arangodb.shaded.netty.leakDetection.level=DISABLED \ +--initialize-at-build-time=\ + com.arangodb.shaded.netty \ + --initialize-at-run-time=\ + com.arangodb.shaded.netty.buffer.PooledByteBufAllocator,\ + com.arangodb.shaded.netty.buffer.ByteBufAllocator,\ + com.arangodb.shaded.netty.buffer.ByteBufUtil,\ + com.arangodb.shaded.netty.buffer.AbstractReferenceCountedByteBuf,\ + com.arangodb.shaded.netty.handler.ssl.JdkSslServerContext,\ + com.arangodb.shaded.netty.handler.codec.compression.BrotliDecoder,\ + com.arangodb.shaded.netty.handler.codec.compression.ZstdConstants,\ + com.arangodb.shaded.netty.handler.codec.http2.Http2CodecUtil,\ + com.arangodb.shaded.netty.handler.codec.http2.Http2ClientUpgradeCodec,\ + com.arangodb.shaded.netty.handler.codec.http2.Http2ConnectionHandler,\ + com.arangodb.shaded.netty.handler.codec.http2.DefaultHttp2FrameWriter,\ + com.arangodb.shaded.netty.handler.codec.http.HttpObjectEncoder,\ + com.arangodb.shaded.netty.handler.codec.http.websocketx.WebSocket00FrameEncoder,\ + com.arangodb.shaded.netty.handler.codec.http.websocketx.extensions.compression.DeflateDecoder,\ + com.arangodb.shaded.netty.handler.codec.http2.CleartextHttp2ServerUpgradeHandler,\ + com.arangodb.shaded.netty.handler.codec.http2.Http2ServerUpgradeCodec,\ + com.arangodb.shaded.netty.handler.pcap.PcapWriteHandler$WildcardAddressHolder,\ + com.arangodb.shaded.netty.util.AbstractReferenceCounted,\ + com.arangodb.shaded.netty.util.concurrent.GlobalEventExecutor,\ + com.arangodb.shaded.netty.util.concurrent.ImmediateEventExecutor,\ + com.arangodb.shaded.netty.util.concurrent.ScheduledFutureTask,\ + com.arangodb.shaded.netty.util.internal.ThreadLocalRandom,\ + com.arangodb.shaded.netty.util.NetUtilSubstitutions$NetUtilLocalhost4LazyHolder,\ + com.arangodb.shaded.netty.util.NetUtilSubstitutions$NetUtilLocalhost6LazyHolder,\ + com.arangodb.shaded.netty.util.NetUtilSubstitutions$NetUtilLocalhostLazyHolder,\ + com.arangodb.shaded.netty.util.NetUtilSubstitutions$NetUtilNetworkInterfacesLazyHolder,\ + com.arangodb.shaded.netty.handler.ssl.util.ThreadLocalInsecureRandom,\ + com.arangodb.shaded.netty.resolver.dns.DefaultDnsServerAddressStreamProvider,\ + com.arangodb.shaded.netty.resolver.dns.DnsServerAddressStreamProviders$DefaultProviderHolder,\ + com.arangodb.shaded.netty.resolver.dns.DnsNameResolver,\ + com.arangodb.shaded.netty.resolver.HostsFileEntriesResolver,\ + com.arangodb.shaded.netty.resolver.dns.ResolvConf$ResolvConfLazy,\ + com.arangodb.shaded.netty.resolver.dns.DefaultDnsServerAddressStreamProvider,\ + com.arangodb.shaded.vertx.core.buffer.impl.VertxByteBufAllocator diff --git a/shaded/src/main/resources/META-INF/native-image/com.arangodb/arangodb-java-driver-shaded/proxy-config.json b/shaded/src/main/resources/META-INF/native-image/com.arangodb/arangodb-java-driver-shaded/proxy-config.json new file mode 100644 index 000000000..7453e1289 --- /dev/null +++ b/shaded/src/main/resources/META-INF/native-image/com.arangodb/arangodb-java-driver-shaded/proxy-config.json @@ -0,0 +1,26 @@ +[ + { + "interfaces":["com.arangodb.internal.serde.JacksonUtils$JsonFactory"] + }, + { + "interfaces":["com.arangodb.internal.serde.JacksonUtils$StreamReadConstraints"] + }, + { + "interfaces":["com.arangodb.internal.serde.JacksonUtils$StreamReadConstraints$Builder"] + }, + { + "interfaces":["com.arangodb.internal.serde.JacksonUtils$StreamReadConstraints$Static"] + }, + { + "interfaces":["com.arangodb.internal.serde.JacksonUtils$StreamWriteConstraints"] + }, + { + "interfaces":["com.arangodb.internal.serde.JacksonUtils$StreamWriteConstraints$Builder"] + }, + { + "interfaces":["com.arangodb.internal.serde.JacksonUtils$StreamWriteConstraints$Static"] + }, + { + "interfaces":["com.arangodb.internal.serde.JacksonUtils$Version"] + } +] diff --git a/shaded/src/main/resources/META-INF/native-image/com.arangodb/arangodb-java-driver-shaded/reflect-config-mp-config.json b/shaded/src/main/resources/META-INF/native-image/com.arangodb/arangodb-java-driver-shaded/reflect-config-mp-config.json new file mode 100644 index 000000000..84892c37d --- /dev/null +++ b/shaded/src/main/resources/META-INF/native-image/com.arangodb/arangodb-java-driver-shaded/reflect-config-mp-config.json @@ -0,0 +1,13 @@ +[ + { + "name": "com.arangodb.config.HostDescription", + "methods": [ + { + "name": "parse", + "parameterTypes": [ + "java.lang.CharSequence" + ] + } + ] + } +] \ No newline at end of file diff --git a/shaded/src/main/resources/META-INF/native-image/com.arangodb/arangodb-java-driver-shaded/reflect-config-serde.json b/shaded/src/main/resources/META-INF/native-image/com.arangodb/arangodb-java-driver-shaded/reflect-config-serde.json new file mode 100644 index 000000000..77e4aa2d0 --- /dev/null +++ b/shaded/src/main/resources/META-INF/native-image/com.arangodb/arangodb-java-driver-shaded/reflect-config-serde.json @@ -0,0 +1,146 @@ +[ + { + "name": "com.arangodb.internal.serde.JacksonUtils$JsonFactory", + "queryAllDeclaredMethods": true + }, + { + "name": "com.arangodb.internal.serde.JacksonUtils$StreamReadConstraints", + "queryAllDeclaredMethods": true + }, + { + "name": "com.arangodb.internal.serde.JacksonUtils$StreamReadConstraints$Builder", + "queryAllDeclaredMethods": true + }, + { + "name": "com.arangodb.internal.serde.JacksonUtils$StreamReadConstraints$Static", + "queryAllDeclaredMethods": true + }, + { + "name": "com.arangodb.internal.serde.JacksonUtils$StreamWriteConstraints", + "queryAllDeclaredMethods": true + }, + { + "name": "com.arangodb.internal.serde.JacksonUtils$StreamWriteConstraints$Builder", + "queryAllDeclaredMethods": true + }, + { + "name": "com.arangodb.internal.serde.JacksonUtils$StreamWriteConstraints$Static", + "queryAllDeclaredMethods": true + }, + { + "name": "com.arangodb.internal.serde.JacksonUtils$Version", + "queryAllDeclaredMethods": true + }, + { + "name": "com.arangodb.shaded.fasterxml.jackson.core.JsonFactory", + "queryAllPublicMethods": true, + "methods": [ + { + "name": "setStreamReadConstraints", + "parameterTypes": [ + "com.arangodb.shaded.fasterxml.jackson.core.StreamReadConstraints" + ] + }, + { + "name": "setStreamWriteConstraints", + "parameterTypes": [ + "com.arangodb.shaded.fasterxml.jackson.core.StreamWriteConstraints" + ] + }, + { + "name": "version", + "parameterTypes": [] + } + ] + }, + { + "name": "com.arangodb.shaded.fasterxml.jackson.core.StreamReadConstraints", + "queryAllPublicMethods": true, + "methods": [ + { + "name": "builder", + "parameterTypes": [] + } + ] + }, + { + "name": "com.arangodb.shaded.fasterxml.jackson.core.StreamReadConstraints$Builder", + "queryAllPublicMethods": true, + "methods": [ + { + "name": "build", + "parameterTypes": [] + }, + { + "name": "maxDocumentLength", + "parameterTypes": [ + "long" + ] + }, + { + "name": "maxNameLength", + "parameterTypes": [ + "int" + ] + }, + { + "name": "maxNestingDepth", + "parameterTypes": [ + "int" + ] + }, + { + "name": "maxNumberLength", + "parameterTypes": [ + "int" + ] + }, + { + "name": "maxStringLength", + "parameterTypes": [ + "int" + ] + } + ] + }, + { + "name": "com.arangodb.shaded.fasterxml.jackson.core.StreamWriteConstraints", + "queryAllPublicMethods": true, + "methods": [ + { + "name": "builder", + "parameterTypes": [] + } + ] + }, + { + "name": "com.arangodb.shaded.fasterxml.jackson.core.StreamWriteConstraints$Builder", + "queryAllPublicMethods": true, + "methods": [ + { + "name": "build", + "parameterTypes": [] + }, + { + "name": "maxNestingDepth", + "parameterTypes": [ + "int" + ] + } + ] + }, + { + "name": "com.arangodb.shaded.fasterxml.jackson.core.Version", + "queryAllPublicMethods": true, + "methods": [ + { + "name": "getMajorVersion", + "parameterTypes": [] + }, + { + "name": "getMinorVersion", + "parameterTypes": [] + } + ] + } +] diff --git a/shaded/src/main/resources/META-INF/native-image/com.arangodb/arangodb-java-driver-shaded/reflect-config-spi.json b/shaded/src/main/resources/META-INF/native-image/com.arangodb/arangodb-java-driver-shaded/reflect-config-spi.json new file mode 100644 index 000000000..e14d4bf2e --- /dev/null +++ b/shaded/src/main/resources/META-INF/native-image/com.arangodb/arangodb-java-driver-shaded/reflect-config-spi.json @@ -0,0 +1,20 @@ +[ + { + "name": "com.arangodb.shaded.fasterxml.jackson.core.JsonFactory", + "methods": [ + { + "name": "", + "parameterTypes": [] + } + ] + }, + { + "name": "com.arangodb.shaded.jackson.dataformat.velocypack.VPackFactory", + "methods": [ + { + "name": "", + "parameterTypes": [] + } + ] + } +] diff --git a/shaded/src/main/resources/META-INF/native-image/com.arangodb/arangodb-java-driver-shaded/reflect-config.json b/shaded/src/main/resources/META-INF/native-image/com.arangodb/arangodb-java-driver-shaded/reflect-config.json new file mode 100644 index 000000000..94919ac94 --- /dev/null +++ b/shaded/src/main/resources/META-INF/native-image/com.arangodb/arangodb-java-driver-shaded/reflect-config.json @@ -0,0 +1,1652 @@ +[ + { + "name": "com.arangodb.internal.serde.InternalSerializers$FieldLinksSerializer", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.internal.serde.InternalSerializers$CollectionLinksSerializer", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.internal.serde.InternalSerializers$CollectionSchemaRuleSerializer", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.internal.serde.InternalDeserializers$CollectionLinksDeserializer", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.internal.serde.InternalDeserializers$CollectionSchemaRuleDeserializer", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.internal.serde.InternalDeserializers$FieldLinksDeserializer", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.ViewEntity", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.BaseDocument", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.CollectionEntity", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.ReplicationFactor", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.AbstractBaseDocument", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.DocumentEntity", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.SearchAnalyzer", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.SearchAliasPropertiesEntity", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.ArangoSearchPropertiesEntity", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.BaseEdgeDocument", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.ServerRole", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.ReplicationFactor$SatelliteReplicationFactor", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.SegmentationAnalyzerProperties$BreakMode", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.AnalyzerFeature", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.ArangoDBEngine$StorageEngineName", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.SearchAliasIndex$OperationType", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.StreamTransactionStatus", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.QueryExecutionState", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.LogLevel", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.QueryCachePropertiesEntity$CacheMode", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.LogLevelEntity$LogLevel", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.StoreValuesType", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.LoadBalancingStrategy", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.InvertedIndexPrimarySort$Field$Direction", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.IndexType", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.ArangoSearchCompression", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.License", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.CollectionStatus", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.SearchAnalyzerCase", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.AQLAnalyzerProperties$ReturnType", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.StreamType", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.CollectionType", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.KeyType", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.GeoS2AnalyzerProperties$GeoS2Format", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.ViewType", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.ServerMode", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.AnalyzerType", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.GeoJSONAnalyzerProperties$GeoJSONAnalyzerType", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.ShardingStrategy", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.ConsolidationType", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.GeoS2AnalyzerProperties$GeoS2AnalyzerType", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.Permissions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.CollectionRevisionEntity", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.CollectionPropertiesEntity", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.QueryOptimizerRule", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.InvertedIndexPrimarySort$Field", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.GeoJSONAnalyzerProperties", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.GeoPointAnalyzerProperties", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.CursorEntity", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.ClassificationAnalyzerProperties", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.SearchAliasIndex", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.GeoS2AnalyzerProperties", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.IndexEntity", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.AqlExecutionExplainEntity", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.AqlParseEntity", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.EdgeDefinition", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.MultiDocumentEntity", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.AqlQueryExplainEntity$ExecutionStats", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.StopwordsAnalyzerProperties", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.AqlExecutionExplainEntity$ExecutionVariable", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.QueryCachePropertiesEntity", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.CursorStats", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.GraphEntity", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.AqlFunctionEntity", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.DatabaseEntity", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.PrimarySort", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.EdgeNgram", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.AqlExecutionExplainEntity$ExecutionNode", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.AqlQueryExplainEntity$ExecutionVariable", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.TransactionEntity", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.AqlQueryExplainEntity", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.ArangoDBEngine", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.UserEntity", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.ErrorEntity", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.DelimiterAnalyzerProperties", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.AqlExecutionExplainEntity$ExecutionStats", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.FieldLink", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.AqlQueryExplainEntity$ExecutionNode", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.AqlParseEntity$AstNode", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.GeoAnalyzerOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.ShardEntity", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.QueryOptimizerRule$Flags", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.AqlExecutionExplainEntity$ExecutionCollection", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.NearestNeighborsAnalyzerProperties", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.MultiDelimiterAnalyzerProperties", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.PipelineAnalyzerProperties", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.ReplicationFactor$NumericReplicationFactor", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.TextAnalyzerProperties", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.EdgeDefinition$Options", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.ArangoDBVersion", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.SegmentationAnalyzerProperties", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.AQLAnalyzerProperties", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.ConsolidationPolicy", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.MinHashAnalyzerProperties", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.AqlQueryExplainEntity$ExecutionPlan", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.NormAnalyzerProperties", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.AqlQueryExplainEntity$ExecutionCollection", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.KeyOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.AqlExecutionExplainEntity$ExecutionPlan", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.LogLevelEntity", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.NGramAnalyzerProperties", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.CursorWarning", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.StemAnalyzerProperties", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.WildcardAnalyzerProperties", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.StreamTransactionEntity", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.LogEntriesEntity", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.QueryEntity", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.CollectionLink", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.StoredValue", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.DocumentImportEntity", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.CollationAnalyzerProperties", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.InvertedIndexPrimarySort", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.QueryTrackingPropertiesEntity", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.CursorEntity$Extras", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.LogEntriesEntity$Message", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.InvertedIndexEntity", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.InvertedIndexField", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.AqlExecutionExplainEntity$ExecutionExpression", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.EdgeUpdateEntity", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.DocumentCreateEntity", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.VertexUpdateEntity", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.DocumentDeleteEntity", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.VertexEntity", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.DocumentUpdateEntity", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.EdgeEntity", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.MultiDelimiterAnalyzer", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.SegmentationAnalyzer", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.IdentityAnalyzer", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.WildcardAnalyzer", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.GeoPointAnalyzer", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.AQLAnalyzer", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.GeoJSONAnalyzer", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.GeoS2Analyzer", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.PipelineAnalyzer", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.NearestNeighborsAnalyzer", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.MinHashAnalyzer", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.ClassificationAnalyzer", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.StemAnalyzer", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.NormAnalyzer", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.DelimiterAnalyzer", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.CollationAnalyzer", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.TextAnalyzer", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.NGramAnalyzer", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.StopwordsAnalyzer", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.ServerRole", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.ReplicationFactor$SatelliteReplicationFactor", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.SegmentationAnalyzerProperties$BreakMode", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.AnalyzerFeature", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.ArangoDBEngine$StorageEngineName", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.SearchAliasIndex$OperationType", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.StreamTransactionStatus", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.QueryExecutionState", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.LogLevel", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.QueryCachePropertiesEntity$CacheMode", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.LogLevelEntity$LogLevel", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.StoreValuesType", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.LoadBalancingStrategy", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.InvertedIndexPrimarySort$Field$Direction", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.IndexType", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.ArangoSearchCompression", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.License", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.CollectionStatus", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.SearchAnalyzerCase", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.AQLAnalyzerProperties$ReturnType", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.StreamType", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.CollectionType", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.KeyType", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.GeoS2AnalyzerProperties$GeoS2Format", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.ViewType", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.ServerMode", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.AnalyzerType", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.GeoJSONAnalyzerProperties$GeoJSONAnalyzerType", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.ShardingStrategy", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.ConsolidationType", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.arangosearch.analyzer.GeoS2AnalyzerProperties$GeoS2AnalyzerType", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.entity.Permissions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.TransactionalOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.IndexOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.AbstractMDIndexOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.AqlQueryOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.AqlQueryOptions$Optimizer", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.AqlQueryOptions$Options", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.DocumentImportOptions$OnDuplicate", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.MDIFieldValueTypes", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.OverwriteMode", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.ImportType", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.LogOptions$SortOrder", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.ComputedValue$ComputeOn", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.CollectionSchema$Level", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.ZKDIndexOptions$FieldValueTypes", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.DocumentReadOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.CollectionCountOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.DocumentDeleteOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.EdgeDeleteOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.VertexCreateOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.EdgeReplaceOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.VertexReplaceOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.DocumentUpdateOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.GraphDocumentReadOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.EdgeUpdateOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.VertexUpdateOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.DocumentExistsOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.DocumentReplaceOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.VertexDeleteOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.EdgeCreateOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.CollectionTruncateOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.DocumentCreateOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.VertexCollectionDropOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.LogLevelOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.DBCreateOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.AqlFunctionDeleteOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.arangosearch.AnalyzerDeleteOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.StreamTransactionOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.UserUpdateOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.DatabaseUsersOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.ViewRenameOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.arangosearch.SearchAliasPropertiesOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.EdgeCollectionDropOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.CollectionCreateOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.AqlQueryExplainOptions$Options", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.ViewCreateOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.VertexCollectionCreateOptions$Options", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.arangosearch.ArangoSearchOptionsBuilder", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.AqlQueryParseOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.UserAccessOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.ComputedValue", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.EdgeCollectionRemoveOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.arangosearch.ArangoSearchPropertiesOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.VertexCollectionCreateOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.arangosearch.ArangoSearchCreateOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.GraphCreateOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.CollectionsReadOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.TransactionOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.DocumentImportOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.CollectionSchema", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.OptionsBuilder", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.TransactionCollectionOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.ReplaceEdgeDefinitionOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.LogOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.CollectionPropertiesOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.UserCreateOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.DatabaseOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.VertexCollectionRemoveOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.GraphCreateOptions$SmartOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.arangosearch.SearchAliasOptionsBuilder", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.CollectionRenameOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.arangosearch.SearchAliasCreateOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.AqlFunctionGetOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.AqlFunctionCreateOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.AqlQueryExplainOptions$Optimizer", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.AqlQueryExplainOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.ExplainAqlQueryOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.QueueTimeSample", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.GeoIndexOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.TtlIndexOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.FulltextIndexOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.ZKDIndexOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.InvertedIndexOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.PersistentIndexOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.MDIndexOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.MDPrefixedIndexOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.DocumentImportOptions$OnDuplicate", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.MDIFieldValueTypes", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.OverwriteMode", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.ImportType", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.LogOptions$SortOrder", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.ComputedValue$ComputeOn", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.CollectionSchema$Level", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.model.ZKDIndexOptions$FieldValueTypes", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + } +] \ No newline at end of file diff --git a/shaded/src/main/resources/META-INF/native-image/com.arangodb/arangodb-java-driver-shaded/resource-config-spi.json b/shaded/src/main/resources/META-INF/native-image/com.arangodb/arangodb-java-driver-shaded/resource-config-spi.json new file mode 100644 index 000000000..43406b15c --- /dev/null +++ b/shaded/src/main/resources/META-INF/native-image/com.arangodb/arangodb-java-driver-shaded/resource-config-spi.json @@ -0,0 +1,10 @@ +{ + "resources": { + "includes": [ + { + "pattern": "META-INF/services/com.arangodb.shaded.fasterxml.jackson.core.JsonFactory" + } + ] + }, + "bundles": [] +} diff --git a/shaded/src/main/resources/META-INF/native-image/com.arangodb/arangodb-java-driver-shaded/resource-config.json b/shaded/src/main/resources/META-INF/native-image/com.arangodb/arangodb-java-driver-shaded/resource-config.json new file mode 100644 index 000000000..3bb430bb1 --- /dev/null +++ b/shaded/src/main/resources/META-INF/native-image/com.arangodb/arangodb-java-driver-shaded/resource-config.json @@ -0,0 +1,10 @@ +{ + "resources": { + "includes": [ + { + "pattern": "META-INF/vertx/vertx-version.txt" + } + ] + }, + "bundles": [] +} diff --git a/shaded/src/main/resources/META-INF/native-image/com.arangodb/arangodb-java-driver-shaded/serialization-config.json b/shaded/src/main/resources/META-INF/native-image/com.arangodb/arangodb-java-driver-shaded/serialization-config.json new file mode 100644 index 000000000..e5d77727d --- /dev/null +++ b/shaded/src/main/resources/META-INF/native-image/com.arangodb/arangodb-java-driver-shaded/serialization-config.json @@ -0,0 +1,26 @@ +[ + { + "name": "com.arangodb.entity.ErrorEntity" + }, + { + "name": "com.arangodb.ArangoDBException" + }, + { + "name": "com.arangodb.ArangoDBMultipleException" + }, + { + "name": "com.arangodb.internal.net.ArangoDBRedirectException" + }, + { + "name": "com.arangodb.entity.AbstractBaseDocument" + }, + { + "name": "com.arangodb.entity.BaseDocument" + }, + { + "name": "com.arangodb.entity.BaseEdgeDocument" + }, + { + "name": "java.util.HashMap" + } +] diff --git a/shaded/src/main/resources/META-INF/services/com.arangodb.internal.net.ProtocolProvider b/shaded/src/main/resources/META-INF/services/com.arangodb.internal.net.ProtocolProvider new file mode 100644 index 000000000..faf8cac63 --- /dev/null +++ b/shaded/src/main/resources/META-INF/services/com.arangodb.internal.net.ProtocolProvider @@ -0,0 +1,2 @@ +com.arangodb.http.HttpProtocolProvider +com.arangodb.vst.VstProtocolProvider diff --git a/shaded/src/main/resources/META-INF/services/com.arangodb.shaded.fasterxml.jackson.core.JsonFactory b/shaded/src/main/resources/META-INF/services/com.arangodb.shaded.fasterxml.jackson.core.JsonFactory new file mode 100644 index 000000000..89123f5a5 --- /dev/null +++ b/shaded/src/main/resources/META-INF/services/com.arangodb.shaded.fasterxml.jackson.core.JsonFactory @@ -0,0 +1,2 @@ +com.arangodb.shaded.fasterxml.jackson.core.JsonFactory +com.arangodb.shaded.jackson.dataformat.velocypack.VPackFactory diff --git a/spotbugs/spotbugs-exclude.xml b/spotbugs/spotbugs-exclude.xml new file mode 100644 index 000000000..c7e8ee070 --- /dev/null +++ b/spotbugs/spotbugs-exclude.xml @@ -0,0 +1,44 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/main/java/com/arangodb/ArangoCollection.java b/src/main/java/com/arangodb/ArangoCollection.java deleted file mode 100644 index e4d2339cc..000000000 --- a/src/main/java/com/arangodb/ArangoCollection.java +++ /dev/null @@ -1,991 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb; - -import java.util.Collection; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import com.arangodb.entity.CollectionEntity; -import com.arangodb.entity.CollectionPropertiesEntity; -import com.arangodb.entity.CollectionRevisionEntity; -import com.arangodb.entity.DocumentCreateEntity; -import com.arangodb.entity.DocumentDeleteEntity; -import com.arangodb.entity.DocumentImportEntity; -import com.arangodb.entity.DocumentUpdateEntity; -import com.arangodb.entity.IndexEntity; -import com.arangodb.entity.MultiDocumentEntity; -import com.arangodb.entity.Permissions; -import com.arangodb.internal.ArangoExecutorSync; -import com.arangodb.internal.InternalArangoCollection; -import com.arangodb.internal.velocystream.internal.ConnectionSync; -import com.arangodb.model.CollectionPropertiesOptions; -import com.arangodb.model.DocumentCreateOptions; -import com.arangodb.model.DocumentDeleteOptions; -import com.arangodb.model.DocumentExistsOptions; -import com.arangodb.model.DocumentImportOptions; -import com.arangodb.model.DocumentReadOptions; -import com.arangodb.model.DocumentReplaceOptions; -import com.arangodb.model.DocumentUpdateOptions; -import com.arangodb.model.FulltextIndexOptions; -import com.arangodb.model.GeoIndexOptions; -import com.arangodb.model.HashIndexOptions; -import com.arangodb.model.PersistentIndexOptions; -import com.arangodb.model.SkiplistIndexOptions; -import com.arangodb.velocypack.VPackSlice; -import com.arangodb.velocystream.Response; - -/** - * @author Mark Vollmary - * - */ -public class ArangoCollection - extends InternalArangoCollection { - - private static final Logger LOGGER = LoggerFactory.getLogger(ArangoCollection.class); - - protected ArangoCollection(final ArangoDatabase db, final String name) { - super(db, name); - } - - /** - * Creates a new document from the given document, unless there is already a document with the _key given. If no - * _key is given, a new unique _key is generated automatically. - * - * @see API - * Documentation - * @param value - * A representation of a single document (POJO, VPackSlice or String for Json) - * @return information about the document - * @throws ArangoDBException - */ - public DocumentCreateEntity insertDocument(final T value) throws ArangoDBException { - return executor.execute(insertDocumentRequest(value, new DocumentCreateOptions()), - insertDocumentResponseDeserializer(value)); - } - - /** - * Creates a new document from the given document, unless there is already a document with the _key given. If no - * _key is given, a new unique _key is generated automatically. - * - * @see API - * Documentation - * @param value - * A representation of a single document (POJO, VPackSlice or String for Json) - * @param options - * Additional options, can be null - * @return information about the document - * @throws ArangoDBException - */ - public DocumentCreateEntity insertDocument(final T value, final DocumentCreateOptions options) - throws ArangoDBException { - return executor.execute(insertDocumentRequest(value, options), insertDocumentResponseDeserializer(value)); - } - - /** - * Creates new documents from the given documents, unless there is already a document with the _key given. If no - * _key is given, a new unique _key is generated automatically. - * - * @see API - * Documentation - * @param values - * A List of documents (POJO, VPackSlice or String for Json) - * @return information about the documents - * @throws ArangoDBException - */ - public MultiDocumentEntity> insertDocuments(final Collection values) - throws ArangoDBException { - final DocumentCreateOptions params = new DocumentCreateOptions(); - return executor.execute(insertDocumentsRequest(values, params), - insertDocumentsResponseDeserializer(values, params)); - } - - /** - * Creates new documents from the given documents, unless there is already a document with the _key given. If no - * _key is given, a new unique _key is generated automatically. - * - * @see API - * Documentation - * @param values - * A List of documents (POJO, VPackSlice or String for Json) - * @param options - * Additional options, can be null - * @return information about the documents - * @throws ArangoDBException - */ - public MultiDocumentEntity> insertDocuments( - final Collection values, - final DocumentCreateOptions options) throws ArangoDBException { - final DocumentCreateOptions params = (options != null ? options : new DocumentCreateOptions()); - return executor.execute(insertDocumentsRequest(values, params), - insertDocumentsResponseDeserializer(values, params)); - } - - /** - * Imports documents - * - * @param values - * a list of Objects that will be stored as documents - * @return information about the import - * @throws ArangoDBException - */ - public DocumentImportEntity importDocuments(final Collection values) throws ArangoDBException { - return importDocuments(values, new DocumentImportOptions()); - } - - /** - * Imports documents - * - * @param values - * a list of Objects that will be stored as documents - * @param options - * Additional options, can be null - * @return information about the import - * @throws ArangoDBException - */ - public DocumentImportEntity importDocuments(final Collection values, final DocumentImportOptions options) - throws ArangoDBException { - return executor.execute(importDocumentsRequest(values, options), DocumentImportEntity.class); - } - - /** - * Imports documents - * - * @param values - * JSON-encoded array of objects that will be stored as documents - * @return information about the import - * @throws ArangoDBException - */ - public DocumentImportEntity importDocuments(final String values) throws ArangoDBException { - return executor.execute(importDocumentsRequest(values, new DocumentImportOptions()), - DocumentImportEntity.class); - } - - /** - * Imports documents - * - * @param values - * JSON-encoded array of objects that will be stored as documents - * @param options - * Additional options, can be null - * @return information about the import - * @throws ArangoDBException - */ - public DocumentImportEntity importDocuments(final String values, final DocumentImportOptions options) - throws ArangoDBException { - return executor.execute(importDocumentsRequest(values, options), DocumentImportEntity.class); - } - - /** - * Reads a single document - * - * @see API - * Documentation - * @param key - * The key of the document - * @param type - * The type of the document (POJO class, VPackSlice or String for Json) - * @return the document identified by the key - * @throws ArangoDBException - */ - public T getDocument(final String key, final Class type) throws ArangoDBException { - return getDocument(key, type, new DocumentReadOptions()); - } - - /** - * Reads a single document - * - * @see API - * Documentation - * @param key - * The key of the document - * @param type - * The type of the document (POJO class, VPackSlice or String for Json) - * @param options - * Additional options, can be null - * @return the document identified by the key - * @throws ArangoDBException - */ - public T getDocument(final String key, final Class type, final DocumentReadOptions options) - throws ArangoDBException { - executor.validateDocumentKey(key); - try { - return executor.execute(getDocumentRequest(key, options), type); - } catch (final ArangoDBException e) { - if (LOGGER.isDebugEnabled()) { - LOGGER.debug(e.getMessage(), e); - } - if ((e.getResponseCode() != null && (e.getResponseCode().intValue() == 404 - || e.getResponseCode().intValue() == 304 || e.getResponseCode().intValue() == 412)) - && (options == null || options.isCatchException())) { - return null; - } - throw e; - } - } - - /** - * Reads multiple documents - * - * @param keys - * The keys of the documents - * @param type - * The type of the documents (POJO class, VPackSlice or String for Json) - * @return the documents and possible errors - * @throws ArangoDBException - */ - public MultiDocumentEntity getDocuments(final Collection keys, final Class type) - throws ArangoDBException { - final DocumentReadOptions options = new DocumentReadOptions(); - return executor.execute(getDocumentsRequest(keys, options), getDocumentsResponseDeserializer(type, options)); - } - - /** - * Replaces the document with key with the one in the body, provided there is such a document and no precondition is - * violated - * - * @see API - * Documentation - * @param key - * The key of the document - * @param value - * A representation of a single document (POJO, VPackSlice or String for Json) - * @return information about the document - * @throws ArangoDBException - */ - public DocumentUpdateEntity replaceDocument(final String key, final T value) throws ArangoDBException { - return executor.execute(replaceDocumentRequest(key, value, new DocumentReplaceOptions()), - replaceDocumentResponseDeserializer(value)); - } - - /** - * Replaces the document with key with the one in the body, provided there is such a document and no precondition is - * violated - * - * @see API - * Documentation - * @param key - * The key of the document - * @param value - * A representation of a single document (POJO, VPackSlice or String for Json) - * @param options - * Additional options, can be null - * @return information about the document - * @throws ArangoDBException - */ - public DocumentUpdateEntity replaceDocument( - final String key, - final T value, - final DocumentReplaceOptions options) throws ArangoDBException { - return executor.execute(replaceDocumentRequest(key, value, options), - replaceDocumentResponseDeserializer(value)); - } - - /** - * Replaces multiple documents in the specified collection with the ones in the values, the replaced documents are - * specified by the _key attributes in the documents in values. - * - * @see API - * Documentation - * @param values - * A List of documents (POJO, VPackSlice or String for Json) - * @return information about the documents - * @throws ArangoDBException - */ - public MultiDocumentEntity> replaceDocuments(final Collection values) - throws ArangoDBException { - final DocumentReplaceOptions params = new DocumentReplaceOptions(); - return executor.execute(replaceDocumentsRequest(values, params), - replaceDocumentsResponseDeserializer(values, params)); - } - - /** - * Replaces multiple documents in the specified collection with the ones in the values, the replaced documents are - * specified by the _key attributes in the documents in values. - * - * @see API - * Documentation - * @param values - * A List of documents (POJO, VPackSlice or String for Json) - * @param options - * Additional options, can be null - * @return information about the documents - * @throws ArangoDBException - */ - public MultiDocumentEntity> replaceDocuments( - final Collection values, - final DocumentReplaceOptions options) throws ArangoDBException { - final DocumentReplaceOptions params = (options != null ? options : new DocumentReplaceOptions()); - return executor.execute(replaceDocumentsRequest(values, params), - replaceDocumentsResponseDeserializer(values, params)); - } - - /** - * Partially updates the document identified by document-key. The value must contain a document with the attributes - * to patch (the patch document). All attributes from the patch document will be added to the existing document if - * they do not yet exist, and overwritten in the existing document if they do exist there. - * - * @see API - * Documentation - * @param key - * The key of the document - * @param value - * A representation of a single document (POJO, VPackSlice or String for Json) - * @return information about the document - * @throws ArangoDBException - */ - public DocumentUpdateEntity updateDocument(final String key, final T value) throws ArangoDBException { - return executor.execute(updateDocumentRequest(key, value, new DocumentUpdateOptions()), - updateDocumentResponseDeserializer(value)); - } - - /** - * Partially updates the document identified by document-key. The value must contain a document with the attributes - * to patch (the patch document). All attributes from the patch document will be added to the existing document if - * they do not yet exist, and overwritten in the existing document if they do exist there. - * - * @see API - * Documentation - * @param key - * The key of the document - * @param value - * A representation of a single document (POJO, VPackSlice or String for Json) - * @param options - * Additional options, can be null - * @return information about the document - * @throws ArangoDBException - */ - public DocumentUpdateEntity updateDocument( - final String key, - final T value, - final DocumentUpdateOptions options) throws ArangoDBException { - return executor.execute(updateDocumentRequest(key, value, options), updateDocumentResponseDeserializer(value)); - } - - /** - * Partially updates documents, the documents to update are specified by the _key attributes in the objects on - * values. Vales must contain a list of document updates with the attributes to patch (the patch documents). All - * attributes from the patch documents will be added to the existing documents if they do not yet exist, and - * overwritten in the existing documents if they do exist there. - * - * @see API - * Documentation - * @param values - * A list of documents (POJO, VPackSlice or String for Json) - * @return information about the documents - * @throws ArangoDBException - */ - public MultiDocumentEntity> updateDocuments(final Collection values) - throws ArangoDBException { - final DocumentUpdateOptions params = new DocumentUpdateOptions(); - return executor.execute(updateDocumentsRequest(values, params), - updateDocumentsResponseDeserializer(values, params)); - } - - /** - * Partially updates documents, the documents to update are specified by the _key attributes in the objects on - * values. Vales must contain a list of document updates with the attributes to patch (the patch documents). All - * attributes from the patch documents will be added to the existing documents if they do not yet exist, and - * overwritten in the existing documents if they do exist there. - * - * @see API - * Documentation - * @param values - * A list of documents (POJO, VPackSlice or String for Json) - * @param options - * Additional options, can be null - * @return information about the documents - * @throws ArangoDBException - */ - public MultiDocumentEntity> updateDocuments( - final Collection values, - final DocumentUpdateOptions options) throws ArangoDBException { - final DocumentUpdateOptions params = (options != null ? options : new DocumentUpdateOptions()); - return executor.execute(updateDocumentsRequest(values, params), - updateDocumentsResponseDeserializer(values, params)); - } - - /** - * Removes a document - * - * @see API - * Documentation - * @param key - * The key of the document - * @param type - * The type of the document (POJO class, VPackSlice or String for Json). Only necessary if - * options.returnOld is set to true, otherwise can be null. - * @param options - * Additional options, can be null - * @return information about the document - * @throws ArangoDBException - */ - public DocumentDeleteEntity deleteDocument(final String key) throws ArangoDBException { - return executor.execute(deleteDocumentRequest(key, new DocumentDeleteOptions()), - deleteDocumentResponseDeserializer(Void.class)); - } - - /** - * Removes a document - * - * @see API - * Documentation - * @param key - * The key of the document - * @param type - * The type of the document (POJO class, VPackSlice or String for Json). Only necessary if - * options.returnOld is set to true, otherwise can be null. - * @param options - * Additional options, can be null - * @return information about the document - * @throws ArangoDBException - */ - public DocumentDeleteEntity deleteDocument( - final String key, - final Class type, - final DocumentDeleteOptions options) throws ArangoDBException { - return executor.execute(deleteDocumentRequest(key, options), deleteDocumentResponseDeserializer(type)); - } - - /** - * Removes multiple document - * - * @see API - * Documentation - * @param values - * The keys of the documents or the documents themselves - * @param type - * The type of the documents (POJO class, VPackSlice or String for Json). Only necessary if - * options.returnOld is set to true, otherwise can be null. - * @return information about the documents - * @throws ArangoDBException - */ - public MultiDocumentEntity> deleteDocuments(final Collection values) - throws ArangoDBException { - return executor.execute(deleteDocumentsRequest(values, new DocumentDeleteOptions()), - deleteDocumentsResponseDeserializer(Void.class)); - } - - /** - * Removes multiple document - * - * @see API - * Documentation - * @param values - * The keys of the documents or the documents themselves - * @param type - * The type of the documents (POJO class, VPackSlice or String for Json). Only necessary if - * options.returnOld is set to true, otherwise can be null. - * @param options - * Additional options, can be null - * @return information about the documents - * @throws ArangoDBException - */ - public MultiDocumentEntity> deleteDocuments( - final Collection values, - final Class type, - final DocumentDeleteOptions options) throws ArangoDBException { - return executor.execute(deleteDocumentsRequest(values, options), deleteDocumentsResponseDeserializer(type)); - } - - /** - * Checks if the document exists by reading a single document head - * - * @see API - * Documentation - * @param key - * The key of the document - * @return true if the document was found, otherwise false - */ - public Boolean documentExists(final String key) { - return documentExists(key, new DocumentExistsOptions()); - } - - /** - * Checks if the document exists by reading a single document head - * - * @see API - * Documentation - * @param key - * The key of the document - * @param options - * Additional options, can be null - * @return true if the document was found, otherwise false - * @throws ArangoDBException - * only thrown when {@link DocumentExistsOptions#isCatchException()} == false - */ - public Boolean documentExists(final String key, final DocumentExistsOptions options) throws ArangoDBException { - try { - executor.execute(documentExistsRequest(key, options), VPackSlice.class); - return true; - } catch (final ArangoDBException e) { - if ((e.getResponseCode() != null && (e.getResponseCode().intValue() == 404 - || e.getResponseCode().intValue() == 304 || e.getResponseCode().intValue() == 412)) - && (options == null || options.isCatchException())) { - return false; - } - throw e; - } - } - - /** - * Returns an index - * - * @see API Documentation - * @param id - * The index-handle - * @return information about the index - * @throws ArangoDBException - */ - public IndexEntity getIndex(final String id) throws ArangoDBException { - return executor.execute(getIndexRequest(id), IndexEntity.class); - } - - /** - * Deletes an index - * - * @see API Documentation - * @param id - * The index-handle - * @return the id of the index - * @throws ArangoDBException - */ - public String deleteIndex(final String id) throws ArangoDBException { - return executor.execute(deleteIndexRequest(id), deleteIndexResponseDeserializer()); - } - - /** - * Creates a hash index for the collection if it does not already exist. - * - * @deprecated use {@link #ensureHashIndex(Iterable, HashIndexOptions)} instead - * @see API Documentation - * @param fields - * A list of attribute paths - * @param options - * Additional options, can be null - * @return information about the index - * @throws ArangoDBException - */ - @Deprecated - public IndexEntity createHashIndex(final Collection fields, final HashIndexOptions options) - throws ArangoDBException { - return executor.execute(createHashIndexRequest(fields, options), IndexEntity.class); - } - - /** - * Creates a hash index for the collection if it does not already exist. - * - * @see API Documentation - * @param fields - * A list of attribute paths - * @param options - * Additional options, can be null - * @return information about the index - * @throws ArangoDBException - */ - public IndexEntity ensureHashIndex(final Iterable fields, final HashIndexOptions options) - throws ArangoDBException { - return executor.execute(createHashIndexRequest(fields, options), IndexEntity.class); - } - - /** - * Creates a skip-list index for the collection, if it does not already exist. - * - * @deprecated use {@link #ensureSkiplistIndex(Collection, SkiplistIndexOptions)} instead - * @see API - * Documentation - * @param fields - * A list of attribute paths - * @param options - * Additional options, can be null - * @return information about the index - * @throws ArangoDBException - */ - @Deprecated - public IndexEntity createSkiplistIndex(final Collection fields, final SkiplistIndexOptions options) - throws ArangoDBException { - return executor.execute(createSkiplistIndexRequest(fields, options), IndexEntity.class); - } - - /** - * Creates a skip-list index for the collection, if it does not already exist. - * - * @see API - * Documentation - * @param fields - * A list of attribute paths - * @param options - * Additional options, can be null - * @return information about the index - * @throws ArangoDBException - */ - public IndexEntity ensureSkiplistIndex(final Iterable fields, final SkiplistIndexOptions options) - throws ArangoDBException { - return executor.execute(createSkiplistIndexRequest(fields, options), IndexEntity.class); - } - - /** - * Creates a persistent index for the collection, if it does not already exist. - * - * @deprecated use {@link #ensurePersistentIndex(Collection, PersistentIndexOptions)} instead - * @see API - * Documentation - * @param fields - * A list of attribute paths - * @param options - * Additional options, can be null - * @return information about the index - * @throws ArangoDBException - */ - @Deprecated - public IndexEntity createPersistentIndex(final Collection fields, final PersistentIndexOptions options) - throws ArangoDBException { - return executor.execute(createPersistentIndexRequest(fields, options), IndexEntity.class); - } - - /** - * Creates a persistent index for the collection, if it does not already exist. - * - * @see API - * Documentation - * @param fields - * A list of attribute paths - * @param options - * Additional options, can be null - * @return information about the index - * @throws ArangoDBException - */ - public IndexEntity ensurePersistentIndex(final Iterable fields, final PersistentIndexOptions options) - throws ArangoDBException { - return executor.execute(createPersistentIndexRequest(fields, options), IndexEntity.class); - } - - /** - * Creates a geo-spatial index for the collection, if it does not already exist. - * - * @deprecated use {@link #ensureGeoIndex(Collection, GeoIndexOptions)} instead - * @see API - * Documentation - * @param fields - * A list of attribute paths - * @param options - * Additional options, can be null - * @return information about the index - * @throws ArangoDBException - */ - @Deprecated - public IndexEntity createGeoIndex(final Collection fields, final GeoIndexOptions options) - throws ArangoDBException { - return executor.execute(createGeoIndexRequest(fields, options), IndexEntity.class); - } - - /** - * Creates a geo-spatial index for the collection, if it does not already exist. - * - * @see API - * Documentation - * @param fields - * A list of attribute paths - * @param options - * Additional options, can be null - * @return information about the index - * @throws ArangoDBException - */ - public IndexEntity ensureGeoIndex(final Iterable fields, final GeoIndexOptions options) - throws ArangoDBException { - return executor.execute(createGeoIndexRequest(fields, options), IndexEntity.class); - } - - /** - * Creates a fulltext index for the collection, if it does not already exist. - * - * @deprecated use {@link #ensureFulltextIndex(Collection, FulltextIndexOptions)} instead - * @see API - * Documentation - * @param fields - * A list of attribute paths - * @param options - * Additional options, can be null - * @return information about the index - * @throws ArangoDBException - */ - @Deprecated - public IndexEntity createFulltextIndex(final Collection fields, final FulltextIndexOptions options) - throws ArangoDBException { - return executor.execute(createFulltextIndexRequest(fields, options), IndexEntity.class); - } - - /** - * Creates a fulltext index for the collection, if it does not already exist. - * - * @see API - * Documentation - * @param fields - * A list of attribute paths - * @param options - * Additional options, can be null - * @return information about the index - * @throws ArangoDBException - */ - public IndexEntity ensureFulltextIndex(final Iterable fields, final FulltextIndexOptions options) - throws ArangoDBException { - return executor.execute(createFulltextIndexRequest(fields, options), IndexEntity.class); - } - - /** - * Returns all indexes of the collection - * - * @see API - * Documentation - * @return information about the indexes - * @throws ArangoDBException - */ - public Collection getIndexes() throws ArangoDBException { - return executor.execute(getIndexesRequest(), getIndexesResponseDeserializer()); - } - - /** - * Checks whether the collection exists - * - * @return true if the collection exists, otherwise false - */ - public boolean exists() throws ArangoDBException { - try { - getInfo(); - return true; - } catch (final ArangoDBException e) { - return false; - } - } - - /** - * Removes all documents from the collection, but leaves the indexes intact - * - * @see API - * Documentation - * @return information about the collection - * @throws ArangoDBException - */ - public CollectionEntity truncate() throws ArangoDBException { - return executor.execute(truncateRequest(), CollectionEntity.class); - } - - /** - * Counts the documents in a collection - * - * @see API - * Documentation - * @return information about the collection, including the number of documents - * @throws ArangoDBException - */ - public CollectionPropertiesEntity count() throws ArangoDBException { - return executor.execute(countRequest(), CollectionPropertiesEntity.class); - } - - /** - * Drops the collection - * - * @see API - * Documentation - * @throws ArangoDBException - */ - public void drop() throws ArangoDBException { - executor.execute(dropRequest(null), Void.class); - } - - /** - * Drops the collection - * - * @see API - * Documentation - * @param isSystem - * Whether or not the collection to drop is a system collection. This parameter must be set to true in - * order to drop a system collection. - * @since ArangoDB 3.1.0 - * @throws ArangoDBException - */ - public void drop(final boolean isSystem) throws ArangoDBException { - executor.execute(dropRequest(isSystem), Void.class); - } - - /** - * Loads a collection into memory. - * - * @see API - * Documentation - * @return information about the collection - * @throws ArangoDBException - */ - public CollectionEntity load() throws ArangoDBException { - return executor.execute(loadRequest(), CollectionEntity.class); - } - - /** - * Removes a collection from memory. This call does not delete any documents. You can use the collection afterwards; - * in which case it will be loaded into memory, again. - * - * @see API - * Documentation - * @return information about the collection - * @throws ArangoDBException - */ - public CollectionEntity unload() throws ArangoDBException { - return executor.execute(unloadRequest(), CollectionEntity.class); - } - - /** - * Returns information about the collection - * - * @see API - * Documentation - * @return information about the collection - * @throws ArangoDBException - */ - public CollectionEntity getInfo() throws ArangoDBException { - return executor.execute(getInfoRequest(), CollectionEntity.class); - } - - /** - * Reads the properties of the specified collection - * - * @see API - * Documentation - * @return properties of the collection - * @throws ArangoDBException - */ - public CollectionPropertiesEntity getProperties() throws ArangoDBException { - return executor.execute(getPropertiesRequest(), CollectionPropertiesEntity.class); - } - - /** - * Changes the properties of a collection - * - * @see API - * Documentation - * @param options - * Additional options, can be null - * @return properties of the collection - * @throws ArangoDBException - */ - public CollectionPropertiesEntity changeProperties(final CollectionPropertiesOptions options) - throws ArangoDBException { - return executor.execute(changePropertiesRequest(options), CollectionPropertiesEntity.class); - } - - /** - * Renames a collection - * - * @see API - * Documentation - * @param newName - * The new name - * @return information about the collection - * @throws ArangoDBException - */ - public CollectionEntity rename(final String newName) throws ArangoDBException { - return executor.execute(renameRequest(newName), CollectionEntity.class); - } - - /** - * Retrieve the collections revision - * - * @see API - * Documentation - * @return information about the collection, including the collections revision - * @throws ArangoDBException - */ - public CollectionRevisionEntity getRevision() throws ArangoDBException { - return executor.execute(getRevisionRequest(), CollectionRevisionEntity.class); - } - - /** - * Grants or revoke access to the collection for user user. You need permission to the _system database in order to - * execute this call. - * - * @see API - * Documentation - * @param user - * The name of the user - * @param permissions - * The permissions the user grant - * @throws ArangoDBException - */ - public void grantAccess(final String user, final Permissions permissions) throws ArangoDBException { - executor.execute(grantAccessRequest(user, permissions), Void.class); - } - - /** - * Revokes access to the collection for user user. You need permission to the _system database in order to execute - * this call. - * - * @see API - * Documentation - * @param user - * The name of the user - * @throws ArangoDBException - */ - public void revokeAccess(final String user) throws ArangoDBException { - executor.execute(grantAccessRequest(user, Permissions.NONE), Void.class); - } - - /** - * Clear the collection access level, revert back to the default access level. - * - * @see API - * Documentation - * @param user - * The name of the user - * @since ArangoDB 3.2.0 - * @throws ArangoDBException - */ - public void resetAccess(final String user) throws ArangoDBException { - executor.execute(resetAccessRequest(user), Void.class); - } - - /** - * Get the collection access level - * - * @see - * API Documentation - * @param user - * The name of the user - * @return permissions of the user - * @since ArangoDB 3.2.0 - * @throws ArangoDBException - */ - public Permissions getPermissions(final String user) throws ArangoDBException { - return executor.execute(getPermissionsRequest(user), getPermissionsResponseDeserialzer()); - } - -} diff --git a/src/main/java/com/arangodb/ArangoCursor.java b/src/main/java/com/arangodb/ArangoCursor.java deleted file mode 100644 index 904397eda..000000000 --- a/src/main/java/com/arangodb/ArangoCursor.java +++ /dev/null @@ -1,142 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb; - -import java.io.Closeable; -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Iterator; -import java.util.List; - -import com.arangodb.entity.CursorEntity; -import com.arangodb.entity.CursorEntity.Extras; -import com.arangodb.entity.CursorEntity.Stats; -import com.arangodb.entity.CursorEntity.Warning; -import com.arangodb.internal.ArangoCursorExecute; -import com.arangodb.internal.ArangoCursorIterator; -import com.arangodb.internal.InternalArangoDatabase; -import com.arangodb.internal.net.HostHandle; - -/** - * @author Mark Vollmary - * - */ -public class ArangoCursor implements Iterable, Iterator, Closeable { - - private final Class type; - protected final ArangoCursorIterator iterator; - private final String id; - private final ArangoCursorExecute execute; - private final HostHandle hostHandle; - - protected ArangoCursor(final InternalArangoDatabase db, final ArangoCursorExecute execute, - final Class type, final CursorEntity result) { - super(); - this.execute = execute; - this.type = type; - hostHandle = new HostHandle(); - iterator = createIterator(this, db, execute, result, hostHandle); - id = result.getId(); - } - - protected ArangoCursorIterator createIterator( - final ArangoCursor cursor, - final InternalArangoDatabase db, - final ArangoCursorExecute execute, - final CursorEntity result, - final HostHandle hostHandle) { - return new ArangoCursorIterator(cursor, execute, db, result, hostHandle); - } - - /** - * @return id of temporary cursor created on the server - */ - public String getId() { - return id; - } - - public Class getType() { - return type; - } - - /** - * @return the total number of result documents available (only available if the query was executed with the count - * attribute set) - */ - public Integer getCount() { - return iterator.getResult().getCount(); - } - - public Stats getStats() { - final Extras extra = iterator.getResult().getExtra(); - return extra != null ? extra.getStats() : null; - } - - public Collection getWarnings() { - final Extras extra = iterator.getResult().getExtra(); - return extra != null ? extra.getWarnings() : null; - } - - /** - * @return indicating whether the query result was served from the query cache or not - */ - public boolean isCached() { - final Boolean cached = iterator.getResult().getCached(); - return cached != null && cached.booleanValue(); - } - - @Override - public void close() throws IOException { - if (id != null) { - execute.close(id, hostHandle); - } - } - - @Override - public boolean hasNext() { - return iterator.hasNext(); - } - - @Override - public T next() { - return iterator.next(); - } - - public List asListRemaining() { - final List remaining = new ArrayList(); - while (hasNext()) { - remaining.add(next()); - } - return remaining; - } - - @Override - public void remove() { - throw new UnsupportedOperationException(); - } - - @Override - public Iterator iterator() { - return iterator; - } - -} diff --git a/src/main/java/com/arangodb/ArangoDB.java b/src/main/java/com/arangodb/ArangoDB.java deleted file mode 100644 index 5a9e59fef..000000000 --- a/src/main/java/com/arangodb/ArangoDB.java +++ /dev/null @@ -1,863 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb; - -import java.io.IOException; -import java.io.InputStream; -import java.lang.annotation.Annotation; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.Properties; - -import javax.net.ssl.SSLContext; - -import com.arangodb.entity.ArangoDBVersion; -import com.arangodb.entity.LoadBalancingStrategy; -import com.arangodb.entity.LogEntity; -import com.arangodb.entity.LogLevelEntity; -import com.arangodb.entity.Permissions; -import com.arangodb.entity.ServerRole; -import com.arangodb.entity.UserEntity; -import com.arangodb.internal.ArangoDBConstants; -import com.arangodb.internal.ArangoExecutor.ResponseDeserializer; -import com.arangodb.internal.ArangoExecutorSync; -import com.arangodb.internal.CollectionCache; -import com.arangodb.internal.CollectionCache.DBAccess; -import com.arangodb.internal.DocumentCache; -import com.arangodb.internal.Host; -import com.arangodb.internal.InternalArangoDB; -import com.arangodb.internal.http.HttpCommunication; -import com.arangodb.internal.http.HttpProtocol; -import com.arangodb.internal.net.CommunicationProtocol; -import com.arangodb.internal.net.ExtendedHostResolver; -import com.arangodb.internal.net.FallbackHostHandler; -import com.arangodb.internal.net.HostHandle; -import com.arangodb.internal.net.HostHandler; -import com.arangodb.internal.net.HostResolver; -import com.arangodb.internal.net.HostResolver.EndpointResolver; -import com.arangodb.internal.net.RandomHostHandler; -import com.arangodb.internal.net.RoundRobinHostHandler; -import com.arangodb.internal.net.SimpleHostResolver; -import com.arangodb.internal.util.ArangoDeserializerImpl; -import com.arangodb.internal.util.ArangoSerializerImpl; -import com.arangodb.internal.util.ArangoUtilImpl; -import com.arangodb.internal.velocypack.VPackDocumentModule; -import com.arangodb.internal.velocypack.VPackDriverModule; -import com.arangodb.internal.velocystream.VstCommunicationSync; -import com.arangodb.internal.velocystream.VstProtocol; -import com.arangodb.internal.velocystream.internal.ConnectionSync; -import com.arangodb.model.LogOptions; -import com.arangodb.model.UserCreateOptions; -import com.arangodb.model.UserUpdateOptions; -import com.arangodb.util.ArangoCursorInitializer; -import com.arangodb.util.ArangoDeserializer; -import com.arangodb.util.ArangoSerialization; -import com.arangodb.util.ArangoSerializer; -import com.arangodb.velocypack.VPack; -import com.arangodb.velocypack.VPackAnnotationFieldFilter; -import com.arangodb.velocypack.VPackAnnotationFieldNaming; -import com.arangodb.velocypack.VPackDeserializer; -import com.arangodb.velocypack.VPackInstanceCreator; -import com.arangodb.velocypack.VPackJsonDeserializer; -import com.arangodb.velocypack.VPackJsonSerializer; -import com.arangodb.velocypack.VPackModule; -import com.arangodb.velocypack.VPackParser; -import com.arangodb.velocypack.VPackParserModule; -import com.arangodb.velocypack.VPackSerializer; -import com.arangodb.velocypack.VPackSlice; -import com.arangodb.velocypack.ValueType; -import com.arangodb.velocypack.exception.VPackException; -import com.arangodb.velocystream.Request; -import com.arangodb.velocystream.RequestType; -import com.arangodb.velocystream.Response; - -/** - * @author Mark Vollmary - * - */ -public class ArangoDB extends InternalArangoDB { - - public static class Builder { - - private final List hosts; - private Host host; - private Integer timeout; - private String user; - private String password; - private Boolean useSsl; - private SSLContext sslContext; - private Integer chunksize; - private Integer maxConnections; - private final VPack.Builder vpackBuilder; - private final VPackParser.Builder vpackParserBuilder; - private ArangoSerializer serializer; - private ArangoDeserializer deserializer; - private Protocol protocol; - private Boolean acquireHostList; - private LoadBalancingStrategy loadBalancingStrategy; - - public Builder() { - super(); - vpackBuilder = new VPack.Builder(); - vpackParserBuilder = new VPackParser.Builder(); - vpackBuilder.registerModule(new VPackDriverModule()); - vpackParserBuilder.registerModule(new VPackDriverModule()); - host = new Host(ArangoDBConstants.DEFAULT_HOST, ArangoDBConstants.DEFAULT_PORT); - hosts = new ArrayList(); - user = ArangoDBConstants.DEFAULT_USER; - loadProperties(ArangoDB.class.getResourceAsStream(DEFAULT_PROPERTY_FILE)); - } - - public Builder loadProperties(final InputStream in) throws ArangoDBException { - if (in != null) { - final Properties properties = new Properties(); - try { - properties.load(in); - loadHosts(properties, this.hosts); - final String host = loadHost(properties, this.host.getHost()); - final int port = loadPort(properties, this.host.getPort()); - this.host = new Host(host, port); - timeout = loadTimeout(properties, timeout); - user = loadUser(properties, user); - password = loadPassword(properties, password); - useSsl = loadUseSsl(properties, useSsl); - chunksize = loadChunkSize(properties, chunksize); - maxConnections = loadMaxConnections(properties, maxConnections); - protocol = loadProtocol(properties, protocol); - acquireHostList = loadAcquireHostList(properties, acquireHostList); - loadBalancingStrategy = loadLoadBalancingStrategy(properties, loadBalancingStrategy); - } catch (final IOException e) { - throw new ArangoDBException(e); - } - } - return this; - } - - /** - * @deprecated will be removed in version 4.2.0 use {@link #host(String, int)} instead - * - * @param host - * @return - */ - @Deprecated - public Builder host(final String host) { - this.host = new Host(host, this.host.getPort()); - return this; - } - - /** - * @deprecated will be removed in version 4.2.0 use {@link #host(String, int)} instead - * - * @param port - * @return - */ - @Deprecated - public Builder port(final Integer port) { - host = new Host(host.getHost(), port); - return this; - } - - /** - * Adds a host to connect to. Multiple hosts can be added to provide fallbacks. - * - * @param host - * address of the host - * @param port - * port of the host - * @return {@link ArangoDB.Builder} - */ - public Builder host(final String host, final int port) { - hosts.add(new Host(host, port)); - return this; - } - - public Builder timeout(final Integer timeout) { - this.timeout = timeout; - return this; - } - - public Builder user(final String user) { - this.user = user; - return this; - } - - public Builder password(final String password) { - this.password = password; - return this; - } - - public Builder useSsl(final Boolean useSsl) { - this.useSsl = useSsl; - return this; - } - - public Builder sslContext(final SSLContext sslContext) { - this.sslContext = sslContext; - return this; - } - - public Builder chunksize(final Integer chunksize) { - this.chunksize = chunksize; - return this; - } - - public Builder maxConnections(final Integer maxConnections) { - this.maxConnections = maxConnections; - return this; - } - - public Builder useProtocol(final Protocol protocol) { - this.protocol = protocol; - return this; - } - - public Builder acquireHostList(final Boolean acquireHostList) { - this.acquireHostList = acquireHostList; - return this; - } - - public Builder loadBalancingStrategy(final LoadBalancingStrategy loadBalancingStrategy) { - this.loadBalancingStrategy = loadBalancingStrategy; - return this; - } - - public Builder registerSerializer(final Class clazz, final VPackSerializer serializer) { - vpackBuilder.registerSerializer(clazz, serializer); - return this; - } - - /** - * Register a special serializer for a member class which can only be identified by its enclosing class. - * - * @param clazz - * type of the enclosing class - * @param serializer - * serializer to register - * @return builder - */ - public Builder registerEnclosingSerializer(final Class clazz, final VPackSerializer serializer) { - vpackBuilder.registerEnclosingSerializer(clazz, serializer); - return this; - } - - public Builder registerDeserializer(final Class clazz, final VPackDeserializer deserializer) { - vpackBuilder.registerDeserializer(clazz, deserializer); - return this; - } - - public Builder registerInstanceCreator(final Class clazz, final VPackInstanceCreator creator) { - vpackBuilder.registerInstanceCreator(clazz, creator); - return this; - } - - public Builder registerJsonDeserializer(final ValueType type, final VPackJsonDeserializer deserializer) { - vpackParserBuilder.registerDeserializer(type, deserializer); - return this; - } - - public Builder registerJsonDeserializer( - final String attribute, - final ValueType type, - final VPackJsonDeserializer deserializer) { - vpackParserBuilder.registerDeserializer(attribute, type, deserializer); - return this; - } - - public Builder registerJsonSerializer(final Class clazz, final VPackJsonSerializer serializer) { - vpackParserBuilder.registerSerializer(clazz, serializer); - return this; - } - - public Builder registerJsonSerializer( - final String attribute, - final Class clazz, - final VPackJsonSerializer serializer) { - vpackParserBuilder.registerSerializer(attribute, clazz, serializer); - return this; - } - - public Builder annotationFieldFilter( - final Class type, - final VPackAnnotationFieldFilter fieldFilter) { - vpackBuilder.annotationFieldFilter(type, fieldFilter); - return this; - } - - public Builder annotationFieldNaming( - final Class type, - final VPackAnnotationFieldNaming fieldNaming) { - vpackBuilder.annotationFieldNaming(type, fieldNaming); - return this; - } - - public Builder registerModule(final VPackModule module) { - vpackBuilder.registerModule(module); - return this; - } - - public Builder registerModules(final VPackModule... modules) { - vpackBuilder.registerModules(modules); - return this; - } - - public Builder registerJsonModule(final VPackParserModule module) { - vpackParserBuilder.registerModule(module); - return this; - } - - public Builder registerJsonModules(final VPackParserModule... module) { - vpackParserBuilder.registerModules(module); - return this; - } - - /** - * Replace the built-in serializer with the given serializer. - * - *
- * ATTENTION!: Use at your own risk - * - * @param serializer - * custom serializer - * @return builder - */ - public Builder setSerializer(final ArangoSerializer serializer) { - this.serializer = serializer; - return this; - } - - /** - * Replace the built-in deserializer with the given deserializer. - * - *
- * ATTENTION!: Use at your own risk - * - * @param deserializer - * custom deserializer - * @return builder - */ - public Builder setDeserializer(final ArangoDeserializer deserializer) { - this.deserializer = deserializer; - return this; - } - - public synchronized ArangoDB build() { - if (hosts.isEmpty()) { - hosts.add(host); - } - final CollectionCache collectionCache = new CollectionCache(); - vpackBuilder.registerModule(new VPackDocumentModule(collectionCache)); - vpackParserBuilder.registerModule(new VPackDocumentModule(collectionCache)); - - final VPack vpacker = vpackBuilder.serializeNullValues(false).build(); - final VPack vpackerNull = vpackBuilder.serializeNullValues(true).build(); - final VPackParser vpackParser = vpackParserBuilder.build(); - final ArangoSerializer serializerTemp = serializer != null ? serializer - : new ArangoSerializerImpl(vpacker, vpackerNull, vpackParser); - final ArangoDeserializer deserializerTemp = deserializer != null ? deserializer - : new ArangoDeserializerImpl(vpackerNull, vpackParser); - - final HostResolver hostResolver = createHostResolver(); - final HostHandler hostHandler = createHostHandler(hostResolver); - return new ArangoDB( - new VstCommunicationSync.Builder(hostHandler).timeout(timeout).user(user).password(password) - .useSsl(useSsl).sslContext(sslContext).chunksize(chunksize).maxConnections(maxConnections), - new HttpCommunication.Builder(hostHandler, protocol).timeout(timeout).user(user).password(password) - .useSsl(useSsl).sslContext(sslContext).maxConnections(maxConnections), - new ArangoUtilImpl(serializerTemp, deserializerTemp), collectionCache, protocol, hostResolver); - } - - private HostResolver createHostResolver() { - return acquireHostList != null && acquireHostList.booleanValue() - ? new ExtendedHostResolver(new ArrayList(hosts)) - : new SimpleHostResolver(new ArrayList(hosts)); - } - - private HostHandler createHostHandler(final HostResolver hostResolver) { - final HostHandler hostHandler; - if (loadBalancingStrategy != null) { - switch (loadBalancingStrategy) { - case ONE_RANDOM: - hostHandler = new RandomHostHandler(hostResolver, new FallbackHostHandler(hostResolver)); - break; - case ROUND_ROBIN: - hostHandler = new RoundRobinHostHandler(hostResolver); - break; - case NONE: - default: - hostHandler = new FallbackHostHandler(hostResolver); - break; - } - } else { - hostHandler = new FallbackHostHandler(hostResolver); - } - return hostHandler; - } - - } - - private ArangoCursorInitializer cursorInitializer; - private CommunicationProtocol cp; - - public ArangoDB(final VstCommunicationSync.Builder vstBuilder, final HttpCommunication.Builder httpBuilder, - final ArangoSerialization util, final CollectionCache collectionCache, final Protocol protocol, - final HostResolver hostResolver) { - super(new ArangoExecutorSync(createProtocol(vstBuilder, httpBuilder, util, collectionCache, protocol), util, - new DocumentCache()), util); - cp = createProtocol(new VstCommunicationSync.Builder(vstBuilder).maxConnections(1), - new HttpCommunication.Builder(httpBuilder).maxConnections(1), util, collectionCache, protocol); - collectionCache.init(new DBAccess() { - @Override - public ArangoDatabase db(final String name) { - return new ArangoDatabase(cp, util, executor.documentCache(), name) - .setCursorInitializer(cursorInitializer); - } - }); - hostResolver.init(new EndpointResolver() { - @Override - public Collection resolve(final boolean closeConnections) throws ArangoDBException { - Collection response; - try { - response = executor.execute( - new Request(ArangoDBConstants.SYSTEM, RequestType.GET, ArangoDBConstants.PATH_ENDPOINTS), - new ResponseDeserializer>() { - @Override - public Collection deserialize(final Response response) throws VPackException { - final VPackSlice field = response.getBody().get(ArangoDBConstants.ENDPOINTS); - Collection endpoints; - if (field.isNone()) { - endpoints = Collections. emptyList(); - } else { - final Collection> tmp = util().deserialize(field, - Collection.class); - endpoints = new ArrayList(); - for (final Map map : tmp) { - for (final String value : map.values()) { - endpoints.add(value); - } - } - } - return endpoints; - } - }, null); - } catch (final ArangoDBException e) { - final Integer responseCode = e.getResponseCode(); - if (responseCode != null && responseCode == 403) { - response = Collections. emptyList(); - } else { - throw e; - } - } finally { - if (closeConnections) { - ArangoDB.this.shutdown(); - } - } - return response; - } - }); - } - - private static CommunicationProtocol createProtocol( - final VstCommunicationSync.Builder vstBuilder, - final HttpCommunication.Builder httpBuilder, - final ArangoSerialization util, - final CollectionCache collectionCache, - final Protocol protocol) { - return (protocol == null || Protocol.VST == protocol) ? createVST(vstBuilder, util, collectionCache) - : createHTTP(httpBuilder, util); - } - - private static CommunicationProtocol createVST( - final VstCommunicationSync.Builder builder, - final ArangoSerialization util, - final CollectionCache collectionCache) { - return new VstProtocol(builder.build(util, collectionCache)); - } - - private static CommunicationProtocol createHTTP( - final HttpCommunication.Builder builder, - final ArangoSerialization util) { - return new HttpProtocol(builder.build(util)); - } - - @Override - protected ArangoExecutorSync executor() { - return executor; - } - - public void shutdown() throws ArangoDBException { - try { - executor.disconnect(); - cp.close(); - } catch (final IOException e) { - throw new ArangoDBException(e); - } - } - - /** - * Returns a handler of the system database - * - * @return database handler - */ - public ArangoDatabase db() { - return db(ArangoDBConstants.SYSTEM); - } - - /** - * Returns a handler of the database by the given name - * - * @param name - * Name of the database - * @return database handler - */ - public ArangoDatabase db(final String name) { - return new ArangoDatabase(this, name).setCursorInitializer(cursorInitializer); - } - - /** - * Creates a new database - * - * @see
API - * Documentation - * @param name - * Has to contain a valid database name - * @return true if the database was created successfully. - * @throws ArangoDBException - */ - public Boolean createDatabase(final String name) throws ArangoDBException { - return executor.execute(createDatabaseRequest(name), createDatabaseResponseDeserializer()); - } - - /** - * Retrieves a list of all existing databases - * - * @see API - * Documentation - * @return a list of all existing databases - * @throws ArangoDBException - */ - public Collection getDatabases() throws ArangoDBException { - return executor.execute(getDatabasesRequest(db().name()), getDatabaseResponseDeserializer()); - } - - /** - * Retrieves a list of all databases the current user can access - * - * @see API - * Documentation - * @return a list of all databases the current user can access - * @throws ArangoDBException - */ - public Collection getAccessibleDatabases() throws ArangoDBException { - return db().getAccessibleDatabases(); - } - - /** - * List available database to the specified user - * - * @see API - * Documentation - * @param user - * The name of the user for which you want to query the databases - * @return - * @throws ArangoDBException - */ - public Collection getAccessibleDatabasesFor(final String user) throws ArangoDBException { - return executor.execute(getAccessibleDatabasesForRequest(db().name(), user), - getAccessibleDatabasesForResponseDeserializer()); - } - - /** - * Returns the server name and version number. - * - * @see API - * Documentation - * @return the server version, number - * @throws ArangoDBException - */ - public ArangoDBVersion getVersion() throws ArangoDBException { - return db().getVersion(); - } - - /** - * Returns the server role. - * - * @return the server role - * @throws ArangoDBException - */ - public ServerRole getRole() throws ArangoDBException { - return executor.execute(getRoleRequest(), getRoleResponseDeserializer()); - } - - /** - * Create a new user. This user will not have access to any database. You need permission to the _system database in - * order to execute this call. - * - * @see API Documentation - * @param user - * The name of the user - * @param passwd - * The user password - * @return information about the user - * @throws ArangoDBException - */ - public UserEntity createUser(final String user, final String passwd) throws ArangoDBException { - return executor.execute(createUserRequest(db().name(), user, passwd, new UserCreateOptions()), - UserEntity.class); - } - - /** - * Create a new user. This user will not have access to any database. You need permission to the _system database in - * order to execute this call. - * - * @see API Documentation - * @param user - * The name of the user - * @param passwd - * The user password - * @param options - * Additional options, can be null - * @return information about the user - * @throws ArangoDBException - */ - public UserEntity createUser(final String user, final String passwd, final UserCreateOptions options) - throws ArangoDBException { - return executor.execute(createUserRequest(db().name(), user, passwd, options), UserEntity.class); - } - - /** - * Removes an existing user, identified by user. You need access to the _system database. - * - * @see API Documentation - * @param user - * The name of the user - * @throws ArangoDBException - */ - public void deleteUser(final String user) throws ArangoDBException { - executor.execute(deleteUserRequest(db().name(), user), Void.class); - } - - /** - * Fetches data about the specified user. You can fetch information about yourself or you need permission to the - * _system database in order to execute this call. - * - * @see API Documentation - * @param user - * The name of the user - * @return information about the user - * @throws ArangoDBException - */ - public UserEntity getUser(final String user) throws ArangoDBException { - return executor.execute(getUserRequest(db().name(), user), UserEntity.class); - } - - /** - * Fetches data about all users. You can only execute this call if you have access to the _system database. - * - * @see API - * Documentation - * @return informations about all users - * @throws ArangoDBException - */ - public Collection getUsers() throws ArangoDBException { - return executor.execute(getUsersRequest(db().name()), getUsersResponseDeserializer()); - } - - /** - * Partially updates the data of an existing user. The name of an existing user must be specified in user. You can - * only change the password of your self. You need access to the _system database to change the active flag. - * - * @see API Documentation - * @param user - * The name of the user - * @param options - * Properties of the user to be changed - * @return information about the user - * @throws ArangoDBException - */ - public UserEntity updateUser(final String user, final UserUpdateOptions options) throws ArangoDBException { - return executor.execute(updateUserRequest(db().name(), user, options), UserEntity.class); - } - - /** - * Replaces the data of an existing user. The name of an existing user must be specified in user. You can only - * change the password of your self. You need access to the _system database to change the active flag. - * - * @see API - * Documentation - * @param user - * The name of the user - * @param options - * Additional properties of the user, can be null - * @return information about the user - * @throws ArangoDBException - */ - public UserEntity replaceUser(final String user, final UserUpdateOptions options) throws ArangoDBException { - return executor.execute(replaceUserRequest(db().name(), user, options), UserEntity.class); - } - - /** - * @deprecated use {@link #grantDefaultDatabaseAccess(String, Permissions)} instead - * - * @param user - * The name of the user - * @param permissions - * The permissions the user grant - * @since ArangoDB 3.2.0 - * @throws ArangoDBException - */ - @Deprecated - public void updateUserDefaultDatabaseAccess(final String user, final Permissions permissions) - throws ArangoDBException { - executor.execute(updateUserDefaultDatabaseAccessRequest(user, permissions), Void.class); - } - - /** - * Sets the default access level for databases for the user user. You need permission to the _system - * database in order to execute this call. - * - * @param user - * The name of the user - * @param permissions - * The permissions the user grant - * @since ArangoDB 3.2.0 - * @throws ArangoDBException - */ - public void grantDefaultDatabaseAccess(final String user, final Permissions permissions) throws ArangoDBException { - executor.execute(updateUserDefaultDatabaseAccessRequest(user, permissions), Void.class); - } - - /** - * @deprecated user {@link #grantDefaultCollectionAccess(String, Permissions)} instead - * - * @param user - * The name of the user - * @param permissions - * The permissions the user grant - * @since ArangoDB 3.2.0 - * @throws ArangoDBException - */ - @Deprecated - public void updateUserDefaultCollectionAccess(final String user, final Permissions permissions) - throws ArangoDBException { - executor.execute(updateUserDefaultCollectionAccessRequest(user, permissions), Void.class); - } - - /** - * Sets the default access level for collections for the user user. You need permission to the _system - * database in order to execute this call. - * - * @param user - * The name of the user - * @param permissions - * The permissions the user grant - * @since ArangoDB 3.2.0 - * @throws ArangoDBException - */ - public void grantDefaultCollectionAccess(final String user, final Permissions permissions) - throws ArangoDBException { - executor.execute(updateUserDefaultCollectionAccessRequest(user, permissions), Void.class); - } - - /** - * Generic Execute. Use this method to execute custom FOXX services. - * - * @param request - * VelocyStream request - * @return VelocyStream response - * @throws ArangoDBException - */ - public Response execute(final Request request) throws ArangoDBException { - return executor.execute(request, new ResponseDeserializer() { - @Override - public Response deserialize(final Response response) throws VPackException { - return response; - } - }); - } - - /** - * Generic Execute. Use this method to execute custom FOXX services. - * - * @param request - * VelocyStream request - * @param hostHandle - * Used to stick to a specific host when using {@link LoadBalancingStrategy#ROUND_ROBIN} - * @return VelocyStream response - * @throws ArangoDBException - */ - public Response execute(final Request request, final HostHandle hostHandle) throws ArangoDBException { - return executor.execute(request, new ResponseDeserializer() { - @Override - public Response deserialize(final Response response) throws VPackException { - return response; - } - }, hostHandle); - } - - /** - * Returns fatal, error, warning or info log messages from the server's global log. - * - * @see API - * Documentation - * @param options - * Additional options, can be null - * @return the log messages - * @throws ArangoDBException - */ - public LogEntity getLogs(final LogOptions options) throws ArangoDBException { - return executor.execute(getLogsRequest(options), LogEntity.class); - } - - /** - * Returns the server's current loglevel settings. - * - * @return the server's current loglevel settings - * @since ArangoDB 3.1.0 - * @throws ArangoDBException - */ - public LogLevelEntity getLogLevel() throws ArangoDBException { - return executor.execute(getLogLevelRequest(), LogLevelEntity.class); - } - - /** - * Modifies and returns the server's current loglevel settings. - * - * @param entity - * loglevel settings - * @return the server's current loglevel settings - * @since ArangoDB 3.1.0 - * @throws ArangoDBException - */ - public LogLevelEntity setLogLevel(final LogLevelEntity entity) throws ArangoDBException { - return executor.execute(setLogLevelRequest(entity), LogLevelEntity.class); - } - - public ArangoDB _setCursorInitializer(final ArangoCursorInitializer cursorInitializer) { - this.cursorInitializer = cursorInitializer; - return this; - } -} diff --git a/src/main/java/com/arangodb/ArangoDBException.java b/src/main/java/com/arangodb/ArangoDBException.java deleted file mode 100644 index 24ba51cb4..000000000 --- a/src/main/java/com/arangodb/ArangoDBException.java +++ /dev/null @@ -1,70 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb; - -import com.arangodb.entity.ErrorEntity; - -/** - * @author Mark Vollmary - * - */ -public class ArangoDBException extends RuntimeException { - - private static final long serialVersionUID = 6165638002614173801L; - private ErrorEntity entity = null; - private Integer responseCode; - - public ArangoDBException(final ErrorEntity errorEntity) { - super(String.format("Response: %s, Error: %s - %s", errorEntity.getCode(), errorEntity.getErrorNum(), - errorEntity.getErrorMessage())); - this.entity = errorEntity; - } - - public ArangoDBException(final String message) { - super(message); - } - - public ArangoDBException(final String message, final Integer responseCode) { - super(message); - this.responseCode = responseCode; - } - - public ArangoDBException(final Throwable cause) { - super(cause); - } - - public String getErrorMessage() { - return entity != null ? entity.getErrorMessage() : null; - } - - public String getException() { - return entity != null ? entity.getException() : null; - } - - public Integer getResponseCode() { - return responseCode != null ? responseCode : entity != null ? entity.getCode() : null; - } - - public Integer getErrorNum() { - return entity != null ? entity.getErrorNum() : null; - } - -} diff --git a/src/main/java/com/arangodb/ArangoDatabase.java b/src/main/java/com/arangodb/ArangoDatabase.java deleted file mode 100644 index 31971bbf6..000000000 --- a/src/main/java/com/arangodb/ArangoDatabase.java +++ /dev/null @@ -1,780 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb; - -import java.util.Collection; -import java.util.Map; - -import com.arangodb.entity.AqlExecutionExplainEntity; -import com.arangodb.entity.AqlFunctionEntity; -import com.arangodb.entity.AqlParseEntity; -import com.arangodb.entity.ArangoDBVersion; -import com.arangodb.entity.CollectionEntity; -import com.arangodb.entity.CursorEntity; -import com.arangodb.entity.DatabaseEntity; -import com.arangodb.entity.EdgeDefinition; -import com.arangodb.entity.GraphEntity; -import com.arangodb.entity.IndexEntity; -import com.arangodb.entity.Permissions; -import com.arangodb.entity.QueryCachePropertiesEntity; -import com.arangodb.entity.QueryEntity; -import com.arangodb.entity.QueryTrackingPropertiesEntity; -import com.arangodb.entity.TraversalEntity; -import com.arangodb.internal.ArangoCursorExecute; -import com.arangodb.internal.ArangoExecutorSync; -import com.arangodb.internal.DocumentCache; -import com.arangodb.internal.InternalArangoDatabase; -import com.arangodb.internal.net.CommunicationProtocol; -import com.arangodb.internal.net.HostHandle; -import com.arangodb.internal.velocystream.internal.ConnectionSync; -import com.arangodb.model.AqlFunctionCreateOptions; -import com.arangodb.model.AqlFunctionDeleteOptions; -import com.arangodb.model.AqlFunctionGetOptions; -import com.arangodb.model.AqlQueryExplainOptions; -import com.arangodb.model.AqlQueryOptions; -import com.arangodb.model.CollectionCreateOptions; -import com.arangodb.model.CollectionsReadOptions; -import com.arangodb.model.DocumentReadOptions; -import com.arangodb.model.GraphCreateOptions; -import com.arangodb.model.TransactionOptions; -import com.arangodb.model.TraversalOptions; -import com.arangodb.util.ArangoCursorInitializer; -import com.arangodb.util.ArangoSerialization; -import com.arangodb.velocypack.Type; -import com.arangodb.velocystream.Request; -import com.arangodb.velocystream.Response; - -/** - * @author Mark Vollmary - * - */ -public class ArangoDatabase extends InternalArangoDatabase { - - private ArangoCursorInitializer cursorInitializer; - - protected ArangoDatabase(final ArangoDB arangoDB, final String name) { - super(arangoDB, arangoDB.executor(), arangoDB.util(), name); - } - - protected ArangoDatabase(final CommunicationProtocol protocol, final ArangoSerialization util, - final DocumentCache documentCache, final String name) { - super(null, new ArangoExecutorSync(protocol, util, documentCache), util, name); - } - - /** - * Returns the server name and version number. - * - * @see API - * Documentation - * @return the server version, number - * @throws ArangoDBException - */ - public ArangoDBVersion getVersion() throws ArangoDBException { - return executor.execute(getVersionRequest(), ArangoDBVersion.class); - } - - /** - * Checks whether the database exists - * - * @return true if the database exists, otherwise false - */ - public boolean exists() throws ArangoDBException { - try { - getInfo(); - return true; - } catch (final ArangoDBException e) { - return false; - } - } - - /** - * Retrieves a list of all databases the current user can access - * - * @see API - * Documentation - * @return a list of all databases the current user can access - * @throws ArangoDBException - */ - public Collection getAccessibleDatabases() throws ArangoDBException { - return executor.execute(getAccessibleDatabasesRequest(), getDatabaseResponseDeserializer()); - } - - /** - * Returns a handler of the collection by the given name - * - * @param name - * Name of the collection - * @return collection handler - */ - public ArangoCollection collection(final String name) { - return new ArangoCollection(this, name); - } - - /** - * Creates a collection - * - * @see API - * Documentation - * @param name - * The name of the collection - * @return information about the collection - * @throws ArangoDBException - */ - public CollectionEntity createCollection(final String name) throws ArangoDBException { - return executor.execute(createCollectionRequest(name, new CollectionCreateOptions()), CollectionEntity.class); - } - - /** - * Creates a collection - * - * @see API - * Documentation - * @param name - * The name of the collection - * @param options - * Additional options, can be null - * @return information about the collection - * @throws ArangoDBException - */ - public CollectionEntity createCollection(final String name, final CollectionCreateOptions options) - throws ArangoDBException { - return executor.execute(createCollectionRequest(name, options), CollectionEntity.class); - } - - /** - * Returns all collections - * - * @see API - * Documentation - * @return list of information about all collections - * @throws ArangoDBException - */ - public Collection getCollections() throws ArangoDBException { - return executor.execute(getCollectionsRequest(new CollectionsReadOptions()), - getCollectionsResponseDeserializer()); - } - - /** - * Returns all collections - * - * @see API - * Documentation - * @param options - * Additional options, can be null - * @return list of information about all collections - * @throws ArangoDBException - */ - public Collection getCollections(final CollectionsReadOptions options) throws ArangoDBException { - return executor.execute(getCollectionsRequest(options), getCollectionsResponseDeserializer()); - } - - /** - * Returns an index - * - * @see API Documentation - * @param id - * The index-handle - * @return information about the index - * @throws ArangoDBException - */ - public IndexEntity getIndex(final String id) throws ArangoDBException { - executor.validateIndexId(id); - final String[] split = id.split("/"); - return collection(split[0]).getIndex(split[1]); - } - - /** - * Deletes an index - * - * @see API Documentation - * @param id - * The index-handle - * @return the id of the index - * @throws ArangoDBException - */ - public String deleteIndex(final String id) throws ArangoDBException { - executor.validateIndexId(id); - final String[] split = id.split("/"); - return collection(split[0]).deleteIndex(split[1]); - } - - /** - * Drop an existing database - * - * @see API - * Documentation - * @return true if the database was dropped successfully - * @throws ArangoDBException - */ - public Boolean drop() throws ArangoDBException { - return executor.execute(dropRequest(), createDropResponseDeserializer()); - } - - /** - * Grants or revoke access to the database for user user. You need permission to the _system database - * in order to execute this call. - * - * @see - * API Documentation - * @param user - * The name of the user - * @param permissions - * The permissions the user grant - * @throws ArangoDBException - */ - public void grantAccess(final String user, final Permissions permissions) throws ArangoDBException { - executor.execute(grantAccessRequest(user, permissions), Void.class); - } - - /** - * Grants access to the database for user user. You need permission to the _system database in order to - * execute this call. - * - * @see - * API Documentation - * @param user - * The name of the user - * @throws ArangoDBException - */ - public void grantAccess(final String user) throws ArangoDBException { - executor.execute(grantAccessRequest(user, Permissions.RW), Void.class); - } - - /** - * Revokes access to the database dbname for user user. You need permission to the _system database in - * order to execute this call. - * - * @see - * API Documentation - * @param user - * The name of the user - * @throws ArangoDBException - */ - public void revokeAccess(final String user) throws ArangoDBException { - executor.execute(grantAccessRequest(user, Permissions.NONE), Void.class); - } - - /** - * Clear the database access level, revert back to the default access level. - * - * @see - * API Documentation - * @param user - * The name of the user - * @since ArangoDB 3.2.0 - * @throws ArangoDBException - */ - public void resetAccess(final String user) throws ArangoDBException { - executor.execute(resetAccessRequest(user), Void.class); - } - - /** - * Sets the default access level for collections within this database for the user user. You need - * permission to the _system database in order to execute this call. - * - * @param user - * The name of the user - * @param permissions - * The permissions the user grant - * @since ArangoDB 3.2.0 - * @throws ArangoDBException - */ - public void grantDefaultCollectionAccess(final String user, final Permissions permissions) - throws ArangoDBException { - executor.execute(updateUserDefaultCollectionAccessRequest(user, permissions), Void.class); - } - - /** - * @deprecated use {@link #grantDefaultCollectionAccess(String, Permissions)} instead - * @param user - * The name of the user - * @param permissions - * The permissions the user grant - * @since ArangoDB 3.2.0 - * @throws ArangoDBException - */ - @Deprecated - public void updateUserDefaultCollectionAccess(final String user, final Permissions permissions) - throws ArangoDBException { - executor.execute(updateUserDefaultCollectionAccessRequest(user, permissions), Void.class); - } - - /** - * Get specific database access level - * - * @see API - * Documentation - * @param user - * The name of the user - * @return permissions of the user - * @since ArangoDB 3.2.0 - * @throws ArangoDBException - */ - public Permissions getPermissions(final String user) throws ArangoDBException { - return executor.execute(getPermissionsRequest(user), getPermissionsResponseDeserialzer()); - } - - /** - * Create a cursor and return the first results - * - * @see API - * Documentation - * @param query - * contains the query string to be executed - * @param bindVars - * key/value pairs representing the bind parameters - * @param options - * Additional options, can be null - * @param type - * The type of the result (POJO class, VPackSlice, String for Json, or Collection/List/Map) - * @return cursor of the results - * @throws ArangoDBException - */ - public ArangoCursor query( - final String query, - final Map bindVars, - final AqlQueryOptions options, - final Class type) throws ArangoDBException { - final Request request = queryRequest(query, bindVars, options); - final CursorEntity result = executor.execute(request, CursorEntity.class); - return createCursor(result, type); - } - - /** - * Return an cursor from the given cursor-ID if still existing - * - * @see API - * Documentation - * @param cursorId - * The ID of the cursor - * @param type - * The type of the result (POJO class, VPackSlice, String for Json, or Collection/List/Map) - * @return cursor of the results - * @throws ArangoDBException - */ - public ArangoCursor cursor(final String cursorId, final Class type) throws ArangoDBException { - final CursorEntity result = executor.execute(queryNextRequest(cursorId), CursorEntity.class); - return createCursor(result, type); - } - - private ArangoCursor createCursor(final CursorEntity result, final Class type) { - final ArangoCursorExecute execute = new ArangoCursorExecute() { - @Override - public CursorEntity next(final String id, final HostHandle hostHandle) { - return executor.execute(queryNextRequest(id), CursorEntity.class, hostHandle); - } - - @Override - public void close(final String id, final HostHandle hostHandle) { - executor.execute(queryCloseRequest(id), Void.class, hostHandle); - } - }; - return cursorInitializer != null ? cursorInitializer.createInstance(this, execute, type, result) - : new ArangoCursor(this, execute, type, result); - } - - /** - * Explain an AQL query and return information about it - * - * @see API - * Documentation - * @param query - * the query which you want explained - * @param bindVars - * key/value pairs representing the bind parameters - * @param options - * Additional options, can be null - * @return information about the query - * @throws ArangoDBException - */ - public AqlExecutionExplainEntity explainQuery( - final String query, - final Map bindVars, - final AqlQueryExplainOptions options) throws ArangoDBException { - return executor.execute(explainQueryRequest(query, bindVars, options), AqlExecutionExplainEntity.class); - } - - /** - * Parse an AQL query and return information about it This method is for query validation only. To actually query - * the database, see {@link ArangoDatabase#query(String, Map, AqlQueryOptions, Class)} - * - * @see API - * Documentation - * @param query - * the query which you want parse - * @return imformation about the query - * @throws ArangoDBException - */ - public AqlParseEntity parseQuery(final String query) throws ArangoDBException { - return executor.execute(parseQueryRequest(query), AqlParseEntity.class); - } - - /** - * Clears the AQL query cache - * - * @see API - * Documentation - * @throws ArangoDBException - */ - public void clearQueryCache() throws ArangoDBException { - executor.execute(clearQueryCacheRequest(), Void.class); - } - - /** - * Returns the global configuration for the AQL query cache - * - * @see API - * Documentation - * @return configuration for the AQL query cache - * @throws ArangoDBException - */ - public QueryCachePropertiesEntity getQueryCacheProperties() throws ArangoDBException { - return executor.execute(getQueryCachePropertiesRequest(), QueryCachePropertiesEntity.class); - } - - /** - * Changes the configuration for the AQL query cache. Note: changing the properties may invalidate all results in - * the cache. - * - * @see API - * Documentation - * @param properties - * properties to be set - * @return current set of properties - * @throws ArangoDBException - */ - public QueryCachePropertiesEntity setQueryCacheProperties(final QueryCachePropertiesEntity properties) - throws ArangoDBException { - return executor.execute(setQueryCachePropertiesRequest(properties), QueryCachePropertiesEntity.class); - } - - /** - * Returns the configuration for the AQL query tracking - * - * @see API - * Documentation - * @return configuration for the AQL query tracking - * @throws ArangoDBException - */ - public QueryTrackingPropertiesEntity getQueryTrackingProperties() throws ArangoDBException { - return executor.execute(getQueryTrackingPropertiesRequest(), QueryTrackingPropertiesEntity.class); - } - - /** - * Changes the configuration for the AQL query tracking - * - * @see API - * Documentation - * @param properties - * properties to be set - * @return current set of properties - * @throws ArangoDBException - */ - public QueryTrackingPropertiesEntity setQueryTrackingProperties(final QueryTrackingPropertiesEntity properties) - throws ArangoDBException { - return executor.execute(setQueryTrackingPropertiesRequest(properties), QueryTrackingPropertiesEntity.class); - } - - /** - * Returns a list of currently running AQL queries - * - * @see API - * Documentation - * @return a list of currently running AQL queries - * @throws ArangoDBException - */ - public Collection getCurrentlyRunningQueries() throws ArangoDBException { - return executor.execute(getCurrentlyRunningQueriesRequest(), new Type>() { - }.getType()); - } - - /** - * Returns a list of slow running AQL queries - * - * @see API - * Documentation - * @return a list of slow running AQL queries - * @throws ArangoDBException - */ - public Collection getSlowQueries() throws ArangoDBException { - return executor.execute(getSlowQueriesRequest(), new Type>() { - }.getType()); - } - - /** - * Clears the list of slow AQL queries - * - * @see API - * Documentation - * @throws ArangoDBException - */ - public void clearSlowQueries() throws ArangoDBException { - executor.execute(clearSlowQueriesRequest(), Void.class); - } - - /** - * Kills a running query. The query will be terminated at the next cancelation point. - * - * @see API - * Documentation - * @param id - * The id of the query - * @throws ArangoDBException - */ - public void killQuery(final String id) throws ArangoDBException { - executor.execute(killQueryRequest(id), Void.class); - } - - /** - * Create a new AQL user function - * - * @see API - * Documentation - * @param name - * the fully qualified name of the user functions - * @param code - * a string representation of the function body - * @param options - * Additional options, can be null - * @throws ArangoDBException - */ - public void createAqlFunction(final String name, final String code, final AqlFunctionCreateOptions options) - throws ArangoDBException { - executor.execute(createAqlFunctionRequest(name, code, options), Void.class); - } - - /** - * Remove an existing AQL user function - * - * @see API - * Documentation - * @param name - * the name of the AQL user function - * @param options - * Additional options, can be null - * @throws ArangoDBException - */ - public void deleteAqlFunction(final String name, final AqlFunctionDeleteOptions options) throws ArangoDBException { - executor.execute(deleteAqlFunctionRequest(name, options), Void.class); - } - - /** - * Gets all reqistered AQL user functions - * - * @see API - * Documentation - * @param options - * Additional options, can be null - * @return all reqistered AQL user functions - * @throws ArangoDBException - */ - public Collection getAqlFunctions(final AqlFunctionGetOptions options) throws ArangoDBException { - return executor.execute(getAqlFunctionsRequest(options), new Type>() { - }.getType()); - } - - /** - * Returns a handler of the graph by the given name - * - * @param name - * Name of the graph - * @return graph handler - */ - public ArangoGraph graph(final String name) { - return new ArangoGraph(this, name); - } - - /** - * Create a new graph in the graph module. The creation of a graph requires the name of the graph and a definition - * of its edges. - * - * @see API - * Documentation - * @param name - * Name of the graph - * @param edgeDefinitions - * An array of definitions for the edge - * @return information about the graph - * @throws ArangoDBException - */ - public GraphEntity createGraph(final String name, final Collection edgeDefinitions) - throws ArangoDBException { - return executor.execute(createGraphRequest(name, edgeDefinitions, new GraphCreateOptions()), - createGraphResponseDeserializer()); - } - - /** - * Create a new graph in the graph module. The creation of a graph requires the name of the graph and a definition - * of its edges. - * - * @see API - * Documentation - * @param name - * Name of the graph - * @param edgeDefinitions - * An array of definitions for the edge - * @param options - * Additional options, can be null - * @return information about the graph - * @throws ArangoDBException - */ - public GraphEntity createGraph( - final String name, - final Collection edgeDefinitions, - final GraphCreateOptions options) throws ArangoDBException { - return executor.execute(createGraphRequest(name, edgeDefinitions, options), createGraphResponseDeserializer()); - } - - /** - * Lists all graphs known to the graph module - * - * @see API - * Documentation - * @return graphs stored in this database - * @throws ArangoDBException - */ - public Collection getGraphs() throws ArangoDBException { - return executor.execute(getGraphsRequest(), getGraphsResponseDeserializer()); - } - - /** - * Execute a server-side transaction - * - * @see API - * Documentation - * @param action - * the actual transaction operations to be executed, in the form of stringified JavaScript code - * @param type - * The type of the result (POJO class, VPackSlice or String for Json) - * @param options - * Additional options, can be null - * @return the result of the transaction if it succeeded - * @throws ArangoDBException - */ - public T transaction(final String action, final Class type, final TransactionOptions options) - throws ArangoDBException { - return executor.execute(transactionRequest(action, options), transactionResponseDeserializer(type)); - } - - /** - * Retrieves information about the current database - * - * @see API - * Documentation - * @return information about the current database - * @throws ArangoDBException - */ - public DatabaseEntity getInfo() throws ArangoDBException { - return executor.execute(getInfoRequest(), getInfoResponseDeserializer()); - } - - /** - * Execute a server-side traversal - * - * @see API - * Documentation - * @param vertexClass - * The type of the vertex documents (POJO class, VPackSlice or String for Json) - * @param edgeClass - * The type of the edge documents (POJO class, VPackSlice or String for Json) - * @param options - * Additional options - * @return Result of the executed traversal - * @throws ArangoDBException - */ - public TraversalEntity executeTraversal( - final Class vertexClass, - final Class edgeClass, - final TraversalOptions options) throws ArangoDBException { - final Request request = executeTraversalRequest(options); - return executor.execute(request, executeTraversalResponseDeserializer(vertexClass, edgeClass)); - } - - /** - * Reads a single document - * - * @see API - * Documentation - * @param id - * The id of the document - * @param type - * The type of the document (POJO class, VPackSlice or String for Json) - * @return the document identified by the id - * @throws ArangoDBException - */ - public T getDocument(final String id, final Class type) throws ArangoDBException { - executor.validateDocumentId(id); - final String[] split = id.split("/"); - return collection(split[0]).getDocument(split[1], type); - } - - /** - * Reads a single document - * - * @see API - * Documentation - * @param id - * The id of the document - * @param type - * The type of the document (POJO class, VPackSlice or String for Json) - * @param options - * Additional options, can be null - * @return the document identified by the id - * @throws ArangoDBException - */ - public T getDocument(final String id, final Class type, final DocumentReadOptions options) - throws ArangoDBException { - executor.validateDocumentId(id); - final String[] split = id.split("/"); - return collection(split[0]).getDocument(split[1], type, options); - } - - /** - * Reload the routing table. - * - * @see API - * Documentation - * @throws ArangoDBException - */ - public void reloadRouting() throws ArangoDBException { - executor.execute(reloadRoutingRequest(), Void.class); - } - - protected ArangoDatabase setCursorInitializer(final ArangoCursorInitializer cursorInitializer) { - this.cursorInitializer = cursorInitializer; - return this; - } - -} diff --git a/src/main/java/com/arangodb/ArangoEdgeCollection.java b/src/main/java/com/arangodb/ArangoEdgeCollection.java deleted file mode 100644 index 42c707dbc..000000000 --- a/src/main/java/com/arangodb/ArangoEdgeCollection.java +++ /dev/null @@ -1,227 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import com.arangodb.entity.EdgeEntity; -import com.arangodb.entity.EdgeUpdateEntity; -import com.arangodb.internal.ArangoExecutorSync; -import com.arangodb.internal.InternalArangoEdgeCollection; -import com.arangodb.internal.velocystream.internal.ConnectionSync; -import com.arangodb.model.DocumentReadOptions; -import com.arangodb.model.EdgeCreateOptions; -import com.arangodb.model.EdgeDeleteOptions; -import com.arangodb.model.EdgeReplaceOptions; -import com.arangodb.model.EdgeUpdateOptions; -import com.arangodb.velocystream.Response; - -/** - * @author Mark Vollmary - * - */ -public class ArangoEdgeCollection extends - InternalArangoEdgeCollection { - - private static final Logger LOGGER = LoggerFactory.getLogger(ArangoEdgeCollection.class); - - protected ArangoEdgeCollection(final ArangoGraph graph, final String name) { - super(graph, name); - } - - /** - * Creates a new edge in the collection - * - * @see API Documentation - * @param value - * A representation of a single edge (POJO, VPackSlice or String for Json) - * @return information about the edge - * @throws ArangoDBException - */ - public EdgeEntity insertEdge(final T value) throws ArangoDBException { - return executor.execute(insertEdgeRequest(value, new EdgeCreateOptions()), - insertEdgeResponseDeserializer(value)); - } - - /** - * Creates a new edge in the collection - * - * @see API Documentation - * @param value - * A representation of a single edge (POJO, VPackSlice or String for Json) - * @param options - * Additional options, can be null - * @return information about the edge - * @throws ArangoDBException - */ - public EdgeEntity insertEdge(final T value, final EdgeCreateOptions options) throws ArangoDBException { - return executor.execute(insertEdgeRequest(value, options), insertEdgeResponseDeserializer(value)); - } - - /** - * Fetches an existing edge - * - * @see API Documentation - * @param key - * The key of the edge - * @param type - * The type of the edge-document (POJO class, VPackSlice or String for Json) - * @return the edge identified by the key - * @throws ArangoDBException - */ - public T getEdge(final String key, final Class type) throws ArangoDBException { - try { - return executor.execute(getEdgeRequest(key, new DocumentReadOptions()), getEdgeResponseDeserializer(type)); - } catch (final ArangoDBException e) { - if (LOGGER.isDebugEnabled()) { - LOGGER.debug(e.getMessage(), e); - } - return null; - } - } - - /** - * Fetches an existing edge - * - * @see API Documentation - * @param key - * The key of the edge - * @param type - * The type of the edge-document (POJO class, VPackSlice or String for Json) - * @param options - * Additional options, can be null - * @return the edge identified by the key - * @throws ArangoDBException - */ - public T getEdge(final String key, final Class type, final DocumentReadOptions options) - throws ArangoDBException { - try { - return executor.execute(getEdgeRequest(key, options), getEdgeResponseDeserializer(type)); - } catch (final ArangoDBException e) { - if (LOGGER.isDebugEnabled()) { - LOGGER.debug(e.getMessage(), e); - } - return null; - } - } - - /** - * Replaces the edge with key with the one in the body, provided there is such a edge and no precondition is - * violated - * - * @see API Documentation - * @param key - * The key of the edge - * @param type - * The type of the edge-document (POJO class, VPackSlice or String for Json) - * @return information about the edge - * @throws ArangoDBException - */ - public EdgeUpdateEntity replaceEdge(final String key, final T value) throws ArangoDBException { - return executor.execute(replaceEdgeRequest(key, value, new EdgeReplaceOptions()), - replaceEdgeResponseDeserializer(value)); - } - - /** - * Replaces the edge with key with the one in the body, provided there is such a edge and no precondition is - * violated - * - * @see API Documentation - * @param key - * The key of the edge - * @param type - * The type of the edge-document (POJO class, VPackSlice or String for Json) - * @param options - * Additional options, can be null - * @return information about the edge - * @throws ArangoDBException - */ - public EdgeUpdateEntity replaceEdge(final String key, final T value, final EdgeReplaceOptions options) - throws ArangoDBException { - return executor.execute(replaceEdgeRequest(key, value, options), replaceEdgeResponseDeserializer(value)); - } - - /** - * Partially updates the edge identified by document-key. The value must contain a document with the attributes to - * patch (the patch document). All attributes from the patch document will be added to the existing document if they - * do not yet exist, and overwritten in the existing document if they do exist there. - * - * @see API Documentation - * @param key - * The key of the edge - * @param type - * The type of the edge-document (POJO class, VPackSlice or String for Json) - * @return information about the edge - * @throws ArangoDBException - */ - public EdgeUpdateEntity updateEdge(final String key, final T value) throws ArangoDBException { - return executor.execute(updateEdgeRequest(key, value, new EdgeUpdateOptions()), - updateEdgeResponseDeserializer(value)); - } - - /** - * Partially updates the edge identified by document-key. The value must contain a document with the attributes to - * patch (the patch document). All attributes from the patch document will be added to the existing document if they - * do not yet exist, and overwritten in the existing document if they do exist there. - * - * @see API Documentation - * @param key - * The key of the edge - * @param type - * The type of the edge-document (POJO class, VPackSlice or String for Json) - * @param options - * Additional options, can be null - * @return information about the edge - * @throws ArangoDBException - */ - public EdgeUpdateEntity updateEdge(final String key, final T value, final EdgeUpdateOptions options) - throws ArangoDBException { - return executor.execute(updateEdgeRequest(key, value, options), updateEdgeResponseDeserializer(value)); - } - - /** - * Removes a edge - * - * @see API Documentation - * @param key - * The key of the edge - * @throws ArangoDBException - */ - public void deleteEdge(final String key) throws ArangoDBException { - executor.execute(deleteEdgeRequest(key, new EdgeDeleteOptions()), Void.class); - } - - /** - * Removes a edge - * - * @see API Documentation - * @param key - * The key of the edge - * @param options - * Additional options, can be null - * @throws ArangoDBException - */ - public void deleteEdge(final String key, final EdgeDeleteOptions options) throws ArangoDBException { - executor.execute(deleteEdgeRequest(key, options), Void.class); - } - -} diff --git a/src/main/java/com/arangodb/ArangoGraph.java b/src/main/java/com/arangodb/ArangoGraph.java deleted file mode 100644 index 65056991e..000000000 --- a/src/main/java/com/arangodb/ArangoGraph.java +++ /dev/null @@ -1,184 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb; - -import java.util.Collection; - -import com.arangodb.entity.EdgeDefinition; -import com.arangodb.entity.GraphEntity; -import com.arangodb.internal.ArangoExecutorSync; -import com.arangodb.internal.InternalArangoGraph; -import com.arangodb.internal.velocystream.internal.ConnectionSync; -import com.arangodb.velocystream.Response; - -/** - * @author Mark Vollmary - * - */ -public class ArangoGraph - extends InternalArangoGraph { - - protected ArangoGraph(final ArangoDatabase db, final String name) { - super(db, name); - } - - /** - * Checks whether the graph exists - * - * @return true if the graph exists, otherwise false - */ - public boolean exists() throws ArangoDBException { - try { - getInfo(); - return true; - } catch (final ArangoDBException e) { - return false; - } - } - - /** - * Delete an existing graph - * - * @see API Documentation - * @throws ArangoDBException - */ - public void drop() throws ArangoDBException { - executor.execute(dropRequest(), Void.class); - } - - /** - * Get a graph from the graph module - * - * @see API Documentation - * @return the definition content of this graph - * @throws ArangoDBException - */ - public GraphEntity getInfo() throws ArangoDBException { - return executor.execute(getInfoRequest(), getInfoResponseDeserializer()); - } - - /** - * Lists all vertex collections used in this graph - * - * @see API - * Documentation - * @return all vertex collections within this graph - * @throws ArangoDBException - */ - public Collection getVertexCollections() throws ArangoDBException { - return executor.execute(getVertexCollectionsRequest(), getVertexCollectionsResponseDeserializer()); - } - - /** - * Adds a vertex collection to the set of collections of the graph. If the collection does not exist, it will be - * created. - * - * @see API - * Documentation - * @param name - * The name of the collection - * @return information about the graph - * @throws ArangoDBException - */ - public GraphEntity addVertexCollection(final String name) throws ArangoDBException { - return executor.execute(addVertexCollectionRequest(name), addVertexCollectionResponseDeserializer()); - } - - /** - * Returns a handler of the vertex collection by the given name - * - * @param name - * Name of the vertex collection - * @return collection handler - */ - public ArangoVertexCollection vertexCollection(final String name) { - return new ArangoVertexCollection(this, name); - } - - /** - * Returns a handler of the edge collection by the given name - * - * @param name - * Name of the edge collection - * @return collection handler - */ - public ArangoEdgeCollection edgeCollection(final String name) { - return new ArangoEdgeCollection(this, name); - } - - /** - * Lists all edge collections used in this graph - * - * @see API - * Documentation - * @return all edge collections within this graph - * @throws ArangoDBException - */ - public Collection getEdgeDefinitions() throws ArangoDBException { - return executor.execute(getEdgeDefinitionsRequest(), getEdgeDefinitionsDeserializer()); - } - - /** - * Add a new edge definition to the graph - * - * @see API - * Documentation - * @param definition - * @return information about the graph - * @throws ArangoDBException - */ - public GraphEntity addEdgeDefinition(final EdgeDefinition definition) throws ArangoDBException { - return executor.execute(addEdgeDefinitionRequest(definition), addEdgeDefinitionResponseDeserializer()); - } - - /** - * Change one specific edge definition. This will modify all occurrences of this definition in all graphs known to - * your database - * - * @see API - * Documentation - * @param definition - * The edge definition - * @return information about the graph - * @throws ArangoDBException - */ - public GraphEntity replaceEdgeDefinition(final EdgeDefinition definition) throws ArangoDBException { - return executor.execute(replaceEdgeDefinitionRequest(definition), replaceEdgeDefinitionResponseDeserializer()); - } - - /** - * Remove one edge definition from the graph. This will only remove the edge collection, the vertex collections - * remain untouched and can still be used in your queries - * - * @see API - * Documentation - * @param definitionName - * The name of the edge collection used in the definition - * @return information about the graph - * @throws ArangoDBException - */ - public GraphEntity removeEdgeDefinition(final String definitionName) throws ArangoDBException { - return executor.execute(removeEdgeDefinitionRequest(definitionName), - removeEdgeDefinitionResponseDeserializer()); - } - -} diff --git a/src/main/java/com/arangodb/ArangoVertexCollection.java b/src/main/java/com/arangodb/ArangoVertexCollection.java deleted file mode 100644 index d38ee91f4..000000000 --- a/src/main/java/com/arangodb/ArangoVertexCollection.java +++ /dev/null @@ -1,242 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import com.arangodb.entity.VertexEntity; -import com.arangodb.entity.VertexUpdateEntity; -import com.arangodb.internal.ArangoExecutorSync; -import com.arangodb.internal.InternalArangoVertexCollection; -import com.arangodb.internal.velocystream.internal.ConnectionSync; -import com.arangodb.model.DocumentReadOptions; -import com.arangodb.model.VertexCreateOptions; -import com.arangodb.model.VertexDeleteOptions; -import com.arangodb.model.VertexReplaceOptions; -import com.arangodb.model.VertexUpdateOptions; -import com.arangodb.velocystream.Response; - -/** - * @author Mark Vollmary - * - */ -public class ArangoVertexCollection extends - InternalArangoVertexCollection { - - private static final Logger LOGGER = LoggerFactory.getLogger(ArangoVertexCollection.class); - - protected ArangoVertexCollection(final ArangoGraph graph, final String name) { - super(graph, name); - } - - /** - * Removes a vertex collection from the graph and optionally deletes the collection, if it is not used in any other - * graph - * - * @see API - * Documentation - * @throws ArangoDBException - */ - public void drop() throws ArangoDBException { - executor.execute(dropRequest(), Void.class); - } - - /** - * Creates a new vertex in the collection - * - * @see API Documentation - * @param value - * A representation of a single vertex (POJO, VPackSlice or String for Json) - * @return information about the vertex - * @throws ArangoDBException - */ - public VertexEntity insertVertex(final T value) throws ArangoDBException { - return executor.execute(insertVertexRequest(value, new VertexCreateOptions()), - insertVertexResponseDeserializer(value)); - } - - /** - * Creates a new vertex in the collection - * - * @see API Documentation - * @param value - * A representation of a single vertex (POJO, VPackSlice or String for Json) - * @param options - * Additional options, can be null - * @return information about the vertex - * @throws ArangoDBException - */ - public VertexEntity insertVertex(final T value, final VertexCreateOptions options) throws ArangoDBException { - return executor.execute(insertVertexRequest(value, options), insertVertexResponseDeserializer(value)); - } - - /** - * Fetches an existing vertex - * - * @see API Documentation - * @param key - * The key of the vertex - * @param type - * The type of the vertex-document (POJO class, VPackSlice or String for Json) - * @return the vertex identified by the key - * @throws ArangoDBException - */ - public T getVertex(final String key, final Class type) throws ArangoDBException { - try { - return executor.execute(getVertexRequest(key, new DocumentReadOptions()), - getVertexResponseDeserializer(type)); - } catch (final ArangoDBException e) { - if (LOGGER.isDebugEnabled()) { - LOGGER.debug(e.getMessage(), e); - } - return null; - } - } - - /** - * Fetches an existing vertex - * - * @see API Documentation - * @param key - * The key of the vertex - * @param type - * The type of the vertex-document (POJO class, VPackSlice or String for Json) - * @param options - * Additional options, can be null - * @return the vertex identified by the key - * @throws ArangoDBException - */ - public T getVertex(final String key, final Class type, final DocumentReadOptions options) - throws ArangoDBException { - try { - return executor.execute(getVertexRequest(key, options), getVertexResponseDeserializer(type)); - } catch (final ArangoDBException e) { - if (LOGGER.isDebugEnabled()) { - LOGGER.debug(e.getMessage(), e); - } - return null; - } - } - - /** - * Replaces the vertex with key with the one in the body, provided there is such a vertex and no precondition is - * violated - * - * @see API - * Documentation - * @param key - * The key of the vertex - * @param type - * The type of the vertex-document (POJO class, VPackSlice or String for Json) - * @return information about the vertex - * @throws ArangoDBException - */ - public VertexUpdateEntity replaceVertex(final String key, final T value) throws ArangoDBException { - return executor.execute(replaceVertexRequest(key, value, new VertexReplaceOptions()), - replaceVertexResponseDeserializer(value)); - } - - /** - * Replaces the vertex with key with the one in the body, provided there is such a vertex and no precondition is - * violated - * - * @see API - * Documentation - * @param key - * The key of the vertex - * @param type - * The type of the vertex-document (POJO class, VPackSlice or String for Json) - * @param options - * Additional options, can be null - * @return information about the vertex - * @throws ArangoDBException - */ - public VertexUpdateEntity replaceVertex(final String key, final T value, final VertexReplaceOptions options) - throws ArangoDBException { - return executor.execute(replaceVertexRequest(key, value, options), replaceVertexResponseDeserializer(value)); - } - - /** - * Partially updates the vertex identified by document-key. The value must contain a document with the attributes to - * patch (the patch document). All attributes from the patch document will be added to the existing document if they - * do not yet exist, and overwritten in the existing document if they do exist there. - * - * @see API Documentation - * @param key - * The key of the vertex - * @param type - * The type of the vertex-document (POJO class, VPackSlice or String for Json) - * @return information about the vertex - * @throws ArangoDBException - */ - public VertexUpdateEntity updateVertex(final String key, final T value) throws ArangoDBException { - return executor.execute(updateVertexRequest(key, value, new VertexUpdateOptions()), - updateVertexResponseDeserializer(value)); - } - - /** - * Partially updates the vertex identified by document-key. The value must contain a document with the attributes to - * patch (the patch document). All attributes from the patch document will be added to the existing document if they - * do not yet exist, and overwritten in the existing document if they do exist there. - * - * @see API Documentation - * @param key - * The key of the vertex - * @param type - * The type of the vertex-document (POJO class, VPackSlice or String for Json) - * @param options - * Additional options, can be null - * @return information about the vertex - * @throws ArangoDBException - */ - public VertexUpdateEntity updateVertex(final String key, final T value, final VertexUpdateOptions options) - throws ArangoDBException { - return executor.execute(updateVertexRequest(key, value, options), updateVertexResponseDeserializer(value)); - } - - /** - * Removes a vertex - * - * @see API Documentation - * @param key - * The key of the vertex - * @throws ArangoDBException - */ - public void deleteVertex(final String key) throws ArangoDBException { - executor.execute(deleteVertexRequest(key, new VertexDeleteOptions()), Void.class); - } - - /** - * Removes a vertex - * - * @see API Documentation - * @param key - * The key of the vertex - * @param options - * Additional options, can be null - * @throws ArangoDBException - */ - public void deleteVertex(final String key, final VertexDeleteOptions options) throws ArangoDBException { - executor.execute(deleteVertexRequest(key, options), Void.class); - } - -} diff --git a/src/main/java/com/arangodb/entity/AqlExecutionExplainEntity.java b/src/main/java/com/arangodb/entity/AqlExecutionExplainEntity.java deleted file mode 100644 index aaf5ae82f..000000000 --- a/src/main/java/com/arangodb/entity/AqlExecutionExplainEntity.java +++ /dev/null @@ -1,297 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.entity; - -import java.util.Collection; - -/** - * @author Mark Vollmary - * - * @see API Documentation - */ -public class AqlExecutionExplainEntity { - - public static class ExecutionPlan { - private Collection nodes; - private Collection rules; - private Collection collections; - private Collection variables; - private Integer estimatedCost; - private Integer estimatedNrItems; - - public Collection getNodes() { - return nodes; - } - - public Collection getRules() { - return rules; - } - - public Collection getCollections() { - return collections; - } - - public Collection getVariables() { - return variables; - } - - public Integer getEstimatedCost() { - return estimatedCost; - } - - public Integer getEstimatedNrItems() { - return estimatedNrItems; - } - } - - public static class ExecutionNode { - private String type; - private Collection dependencies; - private Long id; - private Integer estimatedCost; - private Integer estimatedNrItems; - private Long depth; - private String database; - private String collection; - private ExecutionVariable inVariable; - private ExecutionVariable outVariable; - private ExecutionVariable conditionVariable; - private Boolean random; - private Long offset; - private Long limit; - private Boolean fullCount; - private ExecutionNode subquery; - private Boolean isConst; - private Boolean canThrow; - private String expressionType; - private IndexEntity indexes; - private ExecutionExpression expression; - private ExecutionCollection condition; - private Boolean reverse; - - public String getType() { - return type; - } - - public Collection getDependencies() { - return dependencies; - } - - public Long getId() { - return id; - } - - public Integer getEstimatedCost() { - return estimatedCost; - } - - public Integer getEstimatedNrItems() { - return estimatedNrItems; - } - - public Long getDepth() { - return depth; - } - - public String getDatabase() { - return database; - } - - public String getCollection() { - return collection; - } - - public ExecutionVariable getInVariable() { - return inVariable; - } - - public ExecutionVariable getOutVariable() { - return outVariable; - } - - public ExecutionVariable getConditionVariable() { - return conditionVariable; - } - - public Boolean getRandom() { - return random; - } - - public Long getOffset() { - return offset; - } - - public Long getLimit() { - return limit; - } - - public Boolean getFullCount() { - return fullCount; - } - - public ExecutionNode getSubquery() { - return subquery; - } - - public Boolean getIsConst() { - return isConst; - } - - public Boolean getCanThrow() { - return canThrow; - } - - public String getExpressionType() { - return expressionType; - } - - public IndexEntity getIndexes() { - return indexes; - } - - public ExecutionExpression getExpression() { - return expression; - } - - public ExecutionCollection getCondition() { - return condition; - } - - public Boolean getReverse() { - return reverse; - } - } - - public static class ExecutionVariable { - private Long id; - private String name; - - public Long getId() { - return id; - } - - public String getName() { - return name; - } - } - - public static class ExecutionExpression { - private String type; - private String name; - private Long id; - private Object value; - private Boolean sorted; - private String quantifier; - private Collection levels; - private Collection subNodes; - - public String getType() { - return type; - } - - public String getName() { - return name; - } - - public Long getId() { - return id; - } - - public Object getValue() { - return value; - } - - public Boolean getSorted() { - return sorted; - } - - public String getQuantifier() { - return quantifier; - } - - public Collection getLevels() { - return levels; - } - - public Collection getSubNodes() { - return subNodes; - } - } - - public static class ExecutionCollection { - private String name; - private String type; - - public String getName() { - return name; - } - - public String getType() { - return type; - } - } - - public static class ExecutionStats { - private Integer rulesExecuted; - private Integer rulesSkipped; - private Integer plansCreated; - - public Integer getRulesExecuted() { - return rulesExecuted; - } - - public Integer getRulesSkipped() { - return rulesSkipped; - } - - public Integer getPlansCreated() { - return plansCreated; - } - - } - - private ExecutionPlan plan; - private Collection plans; - private Collection warnings; - private ExecutionStats stats; - private Boolean cacheable; - - public ExecutionPlan getPlan() { - return plan; - } - - public Collection getPlans() { - return plans; - } - - public Collection getWarnings() { - return warnings; - } - - public ExecutionStats getStats() { - return stats; - } - - public Boolean getCacheable() { - return cacheable; - } - -} diff --git a/src/main/java/com/arangodb/entity/AqlParseEntity.java b/src/main/java/com/arangodb/entity/AqlParseEntity.java deleted file mode 100644 index 5484a0929..000000000 --- a/src/main/java/com/arangodb/entity/AqlParseEntity.java +++ /dev/null @@ -1,77 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.entity; - -import java.util.Collection; - -/** - * @author Mark Vollmary - * - * @see API Documentation - */ -public class AqlParseEntity { - - public static class AstNode { - private String type; - private Collection subNodes; - private String name; - private Long id; - private Object value; - - public String getType() { - return type; - } - - public Collection getSubNodes() { - return subNodes; - } - - public String getName() { - return name; - } - - public Long getId() { - return id; - } - - public Object getValue() { - return value; - } - - } - - private Collection collections; - private Collection bindVars; - private Collection ast; - - public Collection getCollections() { - return collections; - } - - public Collection getBindVars() { - return bindVars; - } - - public Collection getAst() { - return ast; - } - -} diff --git a/src/main/java/com/arangodb/entity/ArangoDBVersion.java b/src/main/java/com/arangodb/entity/ArangoDBVersion.java deleted file mode 100644 index 9d7fa20d4..000000000 --- a/src/main/java/com/arangodb/entity/ArangoDBVersion.java +++ /dev/null @@ -1,65 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.entity; - -/** - * @author Mark Vollmary - * - * @see API - * Documentation - */ -public class ArangoDBVersion { - - public enum License { - ENTERPRISE, COMMUNITY - } - - private String server; - private String version; - private License license; - - public ArangoDBVersion() { - super(); - } - - /** - * @return will always contain arango - */ - public String getServer() { - return server; - } - - /** - * @return the server version string. The string has the format "major.minor.sub". major and minor will be numeric, - * and sub may contain a number or a textual version. - */ - public String getVersion() { - return version; - } - - /** - * @return the license - */ - public License getLicense() { - return license; - } - -} diff --git a/src/main/java/com/arangodb/entity/BaseDocument.java b/src/main/java/com/arangodb/entity/BaseDocument.java deleted file mode 100644 index b3b4c0088..000000000 --- a/src/main/java/com/arangodb/entity/BaseDocument.java +++ /dev/null @@ -1,187 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.entity; - -import java.io.Serializable; -import java.util.HashMap; -import java.util.Map; - -import com.arangodb.entity.DocumentField.Type; - -/** - * @author Mark Vollmary - * - */ -public class BaseDocument implements Serializable { - - private static final long serialVersionUID = -1824742667228719116L; - - @DocumentField(Type.ID) - protected String id; - @DocumentField(Type.KEY) - protected String key; - @DocumentField(Type.REV) - protected String revision; - protected Map properties; - - public BaseDocument() { - super(); - properties = new HashMap(); - } - - public BaseDocument(final String key) { - this(); - this.key = key; - } - - public BaseDocument(final Map properties) { - this(); - final Object tmpId = properties.remove(DocumentField.Type.ID.getSerializeName()); - if (tmpId != null) { - id = tmpId.toString(); - } - final Object tmpKey = properties.remove(DocumentField.Type.KEY.getSerializeName()); - if (tmpKey != null) { - key = tmpKey.toString(); - } - final Object tmpRev = properties.remove(DocumentField.Type.REV.getSerializeName()); - if (tmpRev != null) { - revision = tmpRev.toString(); - } - this.properties = properties; - } - - public String getId() { - return id; - } - - public void setId(final String id) { - this.id = id; - } - - public String getKey() { - return key; - } - - public void setKey(final String key) { - this.key = key; - } - - public String getRevision() { - return revision; - } - - public void setRevision(final String revision) { - this.revision = revision; - } - - public Map getProperties() { - return properties; - } - - public void setProperties(final Map properties) { - this.properties = properties; - } - - public void addAttribute(final String key, final Object value) { - properties.put(key, value); - } - - public void updateAttribute(final String key, final Object value) { - if (properties.containsKey(key)) { - properties.put(key, value); - } - } - - public Object getAttribute(final String key) { - return properties.get(key); - } - - @Override - public String toString() { - final StringBuilder sb = new StringBuilder(); - sb.append("BaseDocument [documentRevision="); - sb.append(revision); - sb.append(", documentHandle="); - sb.append(id); - sb.append(", documentKey="); - sb.append(key); - sb.append(", properties="); - sb.append(properties); - sb.append("]"); - return sb.toString(); - } - - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + ((id == null) ? 0 : id.hashCode()); - result = prime * result + ((key == null) ? 0 : key.hashCode()); - result = prime * result + ((properties == null) ? 0 : properties.hashCode()); - result = prime * result + ((revision == null) ? 0 : revision.hashCode()); - return result; - } - - @Override - public boolean equals(final Object obj) { - if (this == obj) { - return true; - } - if (obj == null) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - final BaseDocument other = (BaseDocument) obj; - if (id == null) { - if (other.id != null) { - return false; - } - } else if (!id.equals(other.id)) { - return false; - } - if (key == null) { - if (other.key != null) { - return false; - } - } else if (!key.equals(other.key)) { - return false; - } - if (properties == null) { - if (other.properties != null) { - return false; - } - } else if (!properties.equals(other.properties)) { - return false; - } - if (revision == null) { - if (other.revision != null) { - return false; - } - } else if (!revision.equals(other.revision)) { - return false; - } - return true; - } - -} diff --git a/src/main/java/com/arangodb/entity/BaseEdgeDocument.java b/src/main/java/com/arangodb/entity/BaseEdgeDocument.java deleted file mode 100644 index 362a98442..000000000 --- a/src/main/java/com/arangodb/entity/BaseEdgeDocument.java +++ /dev/null @@ -1,141 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.entity; - -import java.util.Map; - -import com.arangodb.entity.DocumentField.Type; - -/** - * @author Mark Vollmary - * - */ -public class BaseEdgeDocument extends BaseDocument { - - private static final long serialVersionUID = 6904923804449368783L; - - @DocumentField(Type.FROM) - private String from; - @DocumentField(Type.TO) - private String to; - - public BaseEdgeDocument() { - super(); - } - - public BaseEdgeDocument(final String from, final String to) { - super(); - this.from = from; - this.to = to; - } - - public BaseEdgeDocument(final String key, final String from, final String to) { - super(key); - this.from = from; - this.to = to; - } - - public BaseEdgeDocument(final Map properties) { - super(properties); - final Object tmpFrom = properties.remove(DocumentField.Type.FROM.getSerializeName()); - if (tmpFrom != null) { - from = tmpFrom.toString(); - } - final Object tmpTo = properties.remove(DocumentField.Type.TO.getSerializeName()); - if (tmpTo != null) { - to = tmpTo.toString(); - } - } - - public String getFrom() { - return from; - } - - public void setFrom(final String from) { - this.from = from; - } - - public String getTo() { - return to; - } - - public void setTo(final String to) { - this.to = to; - } - - @Override - public String toString() { - final StringBuilder sb = new StringBuilder(); - sb.append("BaseDocument [documentRevision="); - sb.append(revision); - sb.append(", documentHandle="); - sb.append(id); - sb.append(", documentKey="); - sb.append(key); - sb.append(", from="); - sb.append(from); - sb.append(", to="); - sb.append(to); - sb.append(", properties="); - sb.append(properties); - sb.append("]"); - return sb.toString(); - } - - @Override - public int hashCode() { - final int prime = 31; - int result = super.hashCode(); - result = prime * result + ((from == null) ? 0 : from.hashCode()); - result = prime * result + ((to == null) ? 0 : to.hashCode()); - return result; - } - - @Override - public boolean equals(final Object obj) { - if (this == obj) { - return true; - } - if (!super.equals(obj)) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - final BaseEdgeDocument other = (BaseEdgeDocument) obj; - if (from == null) { - if (other.from != null) { - return false; - } - } else if (!from.equals(other.from)) { - return false; - } - if (to == null) { - if (other.to != null) { - return false; - } - } else if (!to.equals(other.to)) { - return false; - } - return true; - } - -} diff --git a/src/main/java/com/arangodb/entity/CollectionEntity.java b/src/main/java/com/arangodb/entity/CollectionEntity.java deleted file mode 100644 index d889e6f1b..000000000 --- a/src/main/java/com/arangodb/entity/CollectionEntity.java +++ /dev/null @@ -1,70 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.entity; - -/** - * @author Mark Vollmary - * - * @see API Documentation - */ -public class CollectionEntity { - - private String id; - private String name; - private Boolean waitForSync; - private Boolean isVolatile; - private Boolean isSystem; - private CollectionStatus status; - private CollectionType type; - - public CollectionEntity() { - super(); - } - - public String getId() { - return id; - } - - public String getName() { - return name; - } - - public Boolean getWaitForSync() { - return waitForSync; - } - - public Boolean getIsVolatile() { - return isVolatile; - } - - public Boolean getIsSystem() { - return isSystem; - } - - public CollectionStatus getStatus() { - return status; - } - - public CollectionType getType() { - return type; - } - -} diff --git a/src/main/java/com/arangodb/entity/CollectionPropertiesEntity.java b/src/main/java/com/arangodb/entity/CollectionPropertiesEntity.java deleted file mode 100644 index 7bce871a7..000000000 --- a/src/main/java/com/arangodb/entity/CollectionPropertiesEntity.java +++ /dev/null @@ -1,117 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.entity; - -import java.util.Collection; - -/** - * @author Mark Vollmary - * - * @see API - * Documentation - */ -public class CollectionPropertiesEntity extends CollectionEntity { - - private Boolean doCompact; - private Long journalSize; - private Integer indexBuckets; - private KeyOptions keyOptions; - private Long count; - private Integer numberOfShards; - private Collection shardKeys; - private Integer replicationFactor; - - public CollectionPropertiesEntity() { - super(); - } - - public Boolean getDoCompact() { - return doCompact; - } - - public void setDoCompact(final Boolean doCompact) { - this.doCompact = doCompact; - } - - public Long getJournalSize() { - return journalSize; - } - - public void setJournalSize(final Long journalSize) { - this.journalSize = journalSize; - } - - public Integer getIndexBuckets() { - return indexBuckets; - } - - public void setIndexBuckets(final Integer indexBuckets) { - this.indexBuckets = indexBuckets; - } - - public KeyOptions getKeyOptions() { - return keyOptions; - } - - public void setKeyOptions(final KeyOptions keyOptions) { - this.keyOptions = keyOptions; - } - - public Long getCount() { - return count; - } - - public void setCount(final Long count) { - this.count = count; - } - - /** - * @return contains the names of document attributes that are used to determine the target shard for documents. Only - * in a cluster setup - */ - public Integer getNumberOfShards() { - return numberOfShards; - } - - public void setNumberOfShards(final Integer numberOfShards) { - this.numberOfShards = numberOfShards; - } - - /** - * @return the number of shards of the collection. Only in a cluster setup. - */ - public Collection getShardKeys() { - return shardKeys; - } - - public void setShardKeys(final Collection shardKeys) { - this.shardKeys = shardKeys; - } - - public Integer getReplicationFactor() { - return replicationFactor; - } - - public void setReplicationFactor(final Integer replicationFactor) { - this.replicationFactor = replicationFactor; - } - -} diff --git a/src/main/java/com/arangodb/entity/CursorEntity.java b/src/main/java/com/arangodb/entity/CursorEntity.java deleted file mode 100644 index 2bb3d6164..000000000 --- a/src/main/java/com/arangodb/entity/CursorEntity.java +++ /dev/null @@ -1,154 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.entity; - -import java.util.Collection; - -import com.arangodb.velocypack.VPackSlice; - -/** - * @author Mark Vollmary - * - * @see API - * Documentation - */ -public class CursorEntity { - - private String id; - private Integer count; - private Extras extra; - private Boolean cached; - private Boolean hasMore; - private VPackSlice result; - - public String getId() { - return id; - } - - /** - * @return the total number of result documents available (only available if the query was executed with the count - * attribute set) - */ - public Integer getCount() { - return count; - } - - /** - * @return an optional object with extra information about the query result contained in its stats sub-attribute. - * For data-modification queries, the extra.stats sub-attribute will contain the number of modified - * documents and the number of documents that could not be modified due to an error (if ignoreErrors query - * option is specified) - */ - public Extras getExtra() { - return extra; - } - - /** - * @return a boolean flag indicating whether the query result was served from the query cache or not. If the query - * result is served from the query cache, the extra return attribute will not contain any stats - * sub-attribute and no profile sub-attribute. - */ - public Boolean getCached() { - return cached; - } - - /** - * @return A boolean indicator whether there are more results available for the cursor on the server - */ - public Boolean getHasMore() { - return hasMore; - } - - /** - * @return an vpack-array of result documents (might be empty if query has no results) - */ - public VPackSlice getResult() { - return result; - } - - public static class Warning { - - private Integer code; - private String message; - - public Integer getCode() { - return code; - } - - public String getMessage() { - return message; - } - - } - - public static class Extras { - private Stats stats; - private Collection warnings; - - public Stats getStats() { - return stats; - } - - public Collection getWarnings() { - return warnings; - } - - } - - public static class Stats { - private Long writesExecuted; - private Long writesIgnored; - private Long scannedFull; - private Long scannedIndex; - private Long filtered; - private Long fullCount; - private Double executionTime; - - public Long getWritesExecuted() { - return writesExecuted; - } - - public Long getWritesIgnored() { - return writesIgnored; - } - - public Long getScannedFull() { - return scannedFull; - } - - public Long getScannedIndex() { - return scannedIndex; - } - - public Long getFiltered() { - return filtered; - } - - public Long getFullCount() { - return fullCount; - } - - public Double getExecutionTime() { - return executionTime; - } - - } -} diff --git a/src/main/java/com/arangodb/entity/DatabaseEntity.java b/src/main/java/com/arangodb/entity/DatabaseEntity.java deleted file mode 100644 index 96c533ddd..000000000 --- a/src/main/java/com/arangodb/entity/DatabaseEntity.java +++ /dev/null @@ -1,65 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.entity; - -/** - * @author Mark Vollmary - * - * @see API - * Documentation - */ -public class DatabaseEntity { - - private String id; - private String name; - private String path; - private Boolean isSystem; - - /** - * @return the id of the database - */ - public String getId() { - return id; - } - - /** - * @return the name of the database - */ - public String getName() { - return name; - } - - /** - * @return the filesystem path of the database - */ - public String getPath() { - return path; - } - - /** - * @return whether or not the database is the _system database - */ - public Boolean getIsSystem() { - return isSystem; - } - -} diff --git a/src/main/java/com/arangodb/entity/DocumentDeleteEntity.java b/src/main/java/com/arangodb/entity/DocumentDeleteEntity.java deleted file mode 100644 index d23f65f0d..000000000 --- a/src/main/java/com/arangodb/entity/DocumentDeleteEntity.java +++ /dev/null @@ -1,51 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.entity; - -import com.arangodb.velocypack.annotations.Expose; - -/** - * @author Mark Vollmary - * - * @see API - * Documentation - */ -public class DocumentDeleteEntity extends DocumentEntity { - - @Expose(deserialize = false) - private T oldDocument; - - public DocumentDeleteEntity() { - super(); - } - - /** - * @return If the query parameter returnOld is true, then the complete previous revision of the document is - * returned. - */ - public T getOld() { - return oldDocument; - } - - public void setOld(final T oldDocument) { - this.oldDocument = oldDocument; - } -} diff --git a/src/main/java/com/arangodb/entity/DocumentImportEntity.java b/src/main/java/com/arangodb/entity/DocumentImportEntity.java deleted file mode 100644 index 8d4a3572f..000000000 --- a/src/main/java/com/arangodb/entity/DocumentImportEntity.java +++ /dev/null @@ -1,112 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.entity; - -import java.util.ArrayList; -import java.util.Collection; - -/** - * @author Mark Vollmary - * - */ -public class DocumentImportEntity { - - private Integer created; - private Integer errors; - private Integer empty; - private Integer updated; - private Integer ignored; - private Collection details; - - public DocumentImportEntity() { - super(); - details = new ArrayList(); - } - - /** - * @return number of documents imported. - */ - public Integer getCreated() { - return created; - } - - public void setCreated(final Integer created) { - this.created = created; - } - - /** - * @return number of documents that were not imported due to an error. - */ - public Integer getErrors() { - return errors; - } - - public void setErrors(final Integer errors) { - this.errors = errors; - } - - /** - * @return number of empty lines found in the input (will only contain a value greater zero for types documents or - * auto). - */ - public Integer getEmpty() { - return empty; - } - - public void setEmpty(final Integer empty) { - this.empty = empty; - } - - /** - * @return number of updated/replaced documents (in case onDuplicate was set to either update or replace). - */ - public Integer getUpdated() { - return updated; - } - - public void setUpdated(final Integer updated) { - this.updated = updated; - } - - /** - * @return number of failed but ignored insert operations (in case onDuplicate was set to ignore). - */ - public Integer getIgnored() { - return ignored; - } - - public void setIgnored(final Integer ignored) { - this.ignored = ignored; - } - - /** - * @return if query parameter details is set to true, the result contain details with more detailed information - * about which documents could not be inserted. - */ - public Collection getDetails() { - return details; - } - - public void setDetails(final Collection details) { - this.details = details; - } - -} diff --git a/src/main/java/com/arangodb/entity/DocumentUpdateEntity.java b/src/main/java/com/arangodb/entity/DocumentUpdateEntity.java deleted file mode 100644 index a2dd68d7b..000000000 --- a/src/main/java/com/arangodb/entity/DocumentUpdateEntity.java +++ /dev/null @@ -1,74 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.entity; - -import com.arangodb.velocypack.annotations.Expose; -import com.arangodb.velocypack.annotations.SerializedName; - -/** - * @author Mark Vollmary - * @param - * - * @see API - * Documentation - * - */ -public class DocumentUpdateEntity extends DocumentEntity { - - @SerializedName("_oldRev") - private String oldRev; - @Expose(deserialize = false) - private T newDocument; - @Expose(deserialize = false) - private T oldDocument; - - public DocumentUpdateEntity() { - super(); - } - - public String getOldRev() { - return oldRev; - } - - /** - * @return If the query parameter returnNew is true, then the complete new document is returned. - */ - public T getNew() { - return newDocument; - } - - public void setNew(final T newDocument) { - this.newDocument = newDocument; - } - - /** - * @return If the query parameter returnOld is true, then the complete previous revision of the document is - * returned. - */ - public T getOld() { - return oldDocument; - } - - public void setOld(final T oldDocument) { - this.oldDocument = oldDocument; - } - -} diff --git a/src/main/java/com/arangodb/entity/EdgeDefinition.java b/src/main/java/com/arangodb/entity/EdgeDefinition.java deleted file mode 100644 index cc47f89b6..000000000 --- a/src/main/java/com/arangodb/entity/EdgeDefinition.java +++ /dev/null @@ -1,64 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.entity; - -import java.util.Arrays; -import java.util.Collection; - -/** - * @author Mark Vollmary - * - * @see API Documentation - */ -public class EdgeDefinition { - - private String collection; - private Collection from; - private Collection to; - - public String getCollection() { - return collection; - } - - public EdgeDefinition collection(final String collection) { - this.collection = collection; - return this; - } - - public Collection getFrom() { - return from; - } - - public EdgeDefinition from(final String... from) { - this.from = Arrays.asList(from); - return this; - } - - public Collection getTo() { - return to; - } - - public EdgeDefinition to(final String... to) { - this.to = Arrays.asList(to); - return this; - } - -} diff --git a/src/main/java/com/arangodb/entity/ErrorEntity.java b/src/main/java/com/arangodb/entity/ErrorEntity.java deleted file mode 100644 index bc3f41c29..000000000 --- a/src/main/java/com/arangodb/entity/ErrorEntity.java +++ /dev/null @@ -1,71 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.entity; - -import java.io.Serializable; - -/** - * @author Mark Vollmary - * - */ -public class ErrorEntity implements Serializable { - - private static final long serialVersionUID = -5918898261563691261L; - - private String errorMessage; - private String exception; - private int code; - private int errorNum; - - public ErrorEntity() { - super(); - } - - /** - * @return a descriptive error message - */ - public String getErrorMessage() { - return errorMessage; - } - - /** - * @return the exception message, passed when transaction fails - * @return - */ - public String getException() { - return exception; - } - - /** - * @return the status code - */ - public int getCode() { - return code; - } - - /** - * @return the server error number - */ - public int getErrorNum() { - return errorNum; - } - -} diff --git a/src/main/java/com/arangodb/entity/GraphEntity.java b/src/main/java/com/arangodb/entity/GraphEntity.java deleted file mode 100644 index 80c12e19f..000000000 --- a/src/main/java/com/arangodb/entity/GraphEntity.java +++ /dev/null @@ -1,67 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.entity; - -import java.util.Collection; - -/** - * @author Mark Vollmary - * - * @see API Documentation - */ -public class GraphEntity { - - private String name; - /** - * Special case where _key is used instead of name. - */ - private String _key; - private Collection edgeDefinitions; - private Collection orphanCollections; - private Boolean isSmart; - private Integer numberOfShards; - private String smartGraphAttribute; - - public String getName() { - return name != null ? name : _key; - } - - public Collection getEdgeDefinitions() { - return edgeDefinitions; - } - - public Collection getOrphanCollections() { - return orphanCollections; - } - - public Boolean getIsSmart() { - return isSmart; - } - - public Integer getNumberOfShards() { - return numberOfShards; - } - - public String getSmartGraphAttribute() { - return smartGraphAttribute; - } - -} diff --git a/src/main/java/com/arangodb/entity/IndexEntity.java b/src/main/java/com/arangodb/entity/IndexEntity.java deleted file mode 100644 index 66efde3e3..000000000 --- a/src/main/java/com/arangodb/entity/IndexEntity.java +++ /dev/null @@ -1,91 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.entity; - -import java.util.Collection; - -/** - * @author Mark Vollmary - * - */ -public class IndexEntity { - - private String id; - private IndexType type; - private Collection fields; - private Integer selectivityEstimate; - private Boolean unique; - private Boolean sparse; - private Integer minLength; - private Boolean isNewlyCreated; - private Boolean geoJson; - private Boolean constraint; - private Boolean deduplicate; - - public IndexEntity() { - super(); - } - - public String getId() { - return id; - } - - public IndexType getType() { - return type; - } - - public Collection getFields() { - return fields; - } - - public Integer getSelectivityEstimate() { - return selectivityEstimate; - } - - public Boolean getUnique() { - return unique; - } - - public Boolean getSparse() { - return sparse; - } - - public Integer getMinLength() { - return minLength; - } - - public Boolean getIsNewlyCreated() { - return isNewlyCreated; - } - - public Boolean getGeoJson() { - return geoJson; - } - - public Boolean getConstraint() { - return constraint; - } - - public Boolean getDeduplicate() { - return deduplicate; - } - -} diff --git a/src/main/java/com/arangodb/entity/KeyOptions.java b/src/main/java/com/arangodb/entity/KeyOptions.java deleted file mode 100644 index 41ec5fdfe..000000000 --- a/src/main/java/com/arangodb/entity/KeyOptions.java +++ /dev/null @@ -1,78 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.entity; - -/** - * @author Mark Vollmary - * - */ -public class KeyOptions { - - private Boolean allowUserKeys; - private KeyType type; - private Integer increment; - private Integer offset; - - public KeyOptions() { - super(); - } - - public KeyOptions(final Boolean allowUserKeys, final KeyType type, final Integer increment, final Integer offset) { - super(); - this.allowUserKeys = allowUserKeys; - this.type = type; - this.increment = increment; - this.offset = offset; - } - - public Boolean getAllowUserKeys() { - return allowUserKeys; - } - - public void setAllowUserKeys(final Boolean allowUserKeys) { - this.allowUserKeys = allowUserKeys; - } - - public KeyType getType() { - return type; - } - - public void setType(final KeyType type) { - this.type = type; - } - - public Integer getIncrement() { - return increment; - } - - public void setIncrement(final Integer increment) { - this.increment = increment; - } - - public Integer getOffset() { - return offset; - } - - public void setOffset(final Integer offset) { - this.offset = offset; - } - -} diff --git a/src/main/java/com/arangodb/entity/LogEntity.java b/src/main/java/com/arangodb/entity/LogEntity.java deleted file mode 100644 index 01758c1ea..000000000 --- a/src/main/java/com/arangodb/entity/LogEntity.java +++ /dev/null @@ -1,76 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.entity; - -import java.util.List; - -/** - * @author Mark Vollmary - * - * @see API - * Documentation - */ -public class LogEntity { - - private List lid; - private List level; - private List timestamp; - private List text; - private Long totalAmount; - - /** - * @return a list of log entry identifiers. Each log message is uniquely identified by its @LIT{lid} and the - * identifiers are in ascending order - */ - public List getLid() { - return lid; - } - - /** - * @return a list of the log-levels for all log entries - */ - public List getLevel() { - return level; - } - - /** - * @return a list of the timestamps as seconds since 1970-01-01 for all log entries - */ - public List getTimestamp() { - return timestamp; - } - - /** - * @return a list of the texts of all log entries - */ - public List getText() { - return text; - } - - /** - * @return the total amount of log entries before pagination - */ - public Long getTotalAmount() { - return totalAmount; - } - -} diff --git a/src/main/java/com/arangodb/entity/LogLevelEntity.java b/src/main/java/com/arangodb/entity/LogLevelEntity.java deleted file mode 100644 index f35f32f23..000000000 --- a/src/main/java/com/arangodb/entity/LogLevelEntity.java +++ /dev/null @@ -1,200 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.entity; - -/** - * @author Mark Vollmary - * - */ -public class LogLevelEntity { - - public enum LogLevel { - FATAL, ERROR, WARNING, INFO, DEBUG, TRACE, DEFAULT; - } - - private LogLevel agency; - private LogLevel agencycomm; - private LogLevel cluster; - private LogLevel collector; - private LogLevel communication; - private LogLevel compactor; - private LogLevel config; - private LogLevel datafiles; - private LogLevel graphs; - private LogLevel heartbeat; - private LogLevel mmap; - private LogLevel performance; - private LogLevel queries; - private LogLevel replication; - private LogLevel requests; - private LogLevel startup; - private LogLevel threads; - private LogLevel v8; - - public LogLevelEntity() { - super(); - } - - public LogLevel getAgency() { - return agency; - } - - public void setAgency(final LogLevel agency) { - this.agency = agency; - } - - public LogLevel getAgencycomm() { - return agencycomm; - } - - public void setAgencycomm(final LogLevel agencycomm) { - this.agencycomm = agencycomm; - } - - public LogLevel getCluster() { - return cluster; - } - - public void setCluster(final LogLevel cluster) { - this.cluster = cluster; - } - - public LogLevel getCollector() { - return collector; - } - - public void setCollector(final LogLevel collector) { - this.collector = collector; - } - - public LogLevel getCommunication() { - return communication; - } - - public void setCommunication(final LogLevel communication) { - this.communication = communication; - } - - public LogLevel getCompactor() { - return compactor; - } - - public void setCompactor(final LogLevel compactor) { - this.compactor = compactor; - } - - public LogLevel getConfig() { - return config; - } - - public void setConfig(final LogLevel config) { - this.config = config; - } - - public LogLevel getDatafiles() { - return datafiles; - } - - public void setDatafiles(final LogLevel datafiles) { - this.datafiles = datafiles; - } - - public LogLevel getGraphs() { - return graphs; - } - - public void setGraphs(final LogLevel graphs) { - this.graphs = graphs; - } - - public LogLevel getHeartbeat() { - return heartbeat; - } - - public void setHeartbeat(final LogLevel heartbeat) { - this.heartbeat = heartbeat; - } - - public LogLevel getMmap() { - return mmap; - } - - public void setMmap(final LogLevel mmap) { - this.mmap = mmap; - } - - public LogLevel getPerformance() { - return performance; - } - - public void setPerformance(final LogLevel performance) { - this.performance = performance; - } - - public LogLevel getQueries() { - return queries; - } - - public void setQueries(final LogLevel queries) { - this.queries = queries; - } - - public LogLevel getReplication() { - return replication; - } - - public void setReplication(final LogLevel replication) { - this.replication = replication; - } - - public LogLevel getRequests() { - return requests; - } - - public void setRequests(final LogLevel requests) { - this.requests = requests; - } - - public LogLevel getStartup() { - return startup; - } - - public void setStartup(final LogLevel startup) { - this.startup = startup; - } - - public LogLevel getThreads() { - return threads; - } - - public void setThreads(final LogLevel threads) { - this.threads = threads; - } - - public LogLevel getV8() { - return v8; - } - - public void setV8(final LogLevel v8) { - this.v8 = v8; - } - -} diff --git a/src/main/java/com/arangodb/entity/MultiDocumentEntity.java b/src/main/java/com/arangodb/entity/MultiDocumentEntity.java deleted file mode 100644 index 1645b3408..000000000 --- a/src/main/java/com/arangodb/entity/MultiDocumentEntity.java +++ /dev/null @@ -1,72 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.entity; - -import java.util.Collection; - -/** - * @author Mark Vollmary - * - */ -public class MultiDocumentEntity { - - private Collection documents; - private Collection errors; - private Collection documentsAndErrors; - - public MultiDocumentEntity() { - super(); - } - - /** - * @return all successfully processed documents - */ - public Collection getDocuments() { - return documents; - } - - public void setDocuments(final Collection documents) { - this.documents = documents; - } - - /** - * @return all errors - */ - public Collection getErrors() { - return errors; - } - - public void setErrors(final Collection errors) { - this.errors = errors; - } - - /** - * @return all successfully processed documents and all errors in the same order they are processed - */ - public Collection getDocumentsAndErrors() { - return documentsAndErrors; - } - - public void setDocumentsAndErrors(final Collection documentsAndErrors) { - this.documentsAndErrors = documentsAndErrors; - } - -} diff --git a/src/main/java/com/arangodb/entity/QueryCachePropertiesEntity.java b/src/main/java/com/arangodb/entity/QueryCachePropertiesEntity.java deleted file mode 100644 index c29cca2c7..000000000 --- a/src/main/java/com/arangodb/entity/QueryCachePropertiesEntity.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.entity; - -/** - * @author Mark Vollmary - * - * @see API - * Documentation - */ -public class QueryCachePropertiesEntity { - - public enum CacheMode { - off, on, demand; - } - - private CacheMode mode; - private Long maxResults; - - public QueryCachePropertiesEntity() { - super(); - } - - /** - * @return the mode the AQL query cache operates in. The mode is one of the following values: off, on or demand - */ - public CacheMode getMode() { - return mode; - } - - /** - * @param mode - * the mode the AQL query cache operates in. The mode is one of the following values: off, on or demand - */ - public void setMode(final CacheMode mode) { - this.mode = mode; - } - - /** - * @return the maximum number of query results that will be stored per database-specific cache - */ - public Long getMaxResults() { - return maxResults; - } - - /** - * @param maxResults - * the maximum number of query results that will be stored per database-specific cache - */ - public void setMaxResults(final Long maxResults) { - this.maxResults = maxResults; - } - -} diff --git a/src/main/java/com/arangodb/entity/QueryEntity.java b/src/main/java/com/arangodb/entity/QueryEntity.java deleted file mode 100644 index b2f638097..000000000 --- a/src/main/java/com/arangodb/entity/QueryEntity.java +++ /dev/null @@ -1,88 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.entity; - -import java.util.Date; -import java.util.Map; - -/** - * @author Mark Vollmary - * - */ -public class QueryEntity { - - public static final String PROPERTY_STARTED = "started"; - - private String id; - private String query; - private Date started; - private Double runTime; - private Map bindVars; - private QueryExecutionState state; - - public QueryEntity() { - super(); - } - - /** - * @return the query's id - */ - public String getId() { - return id; - } - - /** - * @return the query string (potentially truncated) - */ - public String getQuery() { - return query; - } - - /** - * @return the date and time when the query was started - */ - public Date getStarted() { - return started; - } - - /** - * - * @return the query's run time up to the point the list of queries was queried - */ - public Double getRunTime() { - return runTime; - } - - /** - * @return the bind parameter values used by the query - */ - public Map getBindVars() { - return bindVars; - } - - /** - * @return the query's current execution state - */ - public QueryExecutionState getState() { - return state; - } - -} diff --git a/src/main/java/com/arangodb/entity/QueryTrackingPropertiesEntity.java b/src/main/java/com/arangodb/entity/QueryTrackingPropertiesEntity.java deleted file mode 100644 index b7175896f..000000000 --- a/src/main/java/com/arangodb/entity/QueryTrackingPropertiesEntity.java +++ /dev/null @@ -1,130 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.entity; - -/** - * @author Mark Vollmary - * - */ -public class QueryTrackingPropertiesEntity { - - private Boolean enabled; - private Boolean trackSlowQueries; - private Long maxSlowQueries; - private Long slowQueryThreshold; - private Long maxQueryStringLength; - - public QueryTrackingPropertiesEntity() { - super(); - } - - /** - * @return If set to true, then queries will be tracked. If set to false, neither queries nor slow queries will be - * tracked - */ - public Boolean getEnabled() { - return enabled; - } - - /** - * @param enabled - * If set to true, then queries will be tracked. If set to false, neither queries nor slow queries will - * be tracked - */ - public void setEnabled(final Boolean enabled) { - this.enabled = enabled; - } - - /** - * @return If set to true, then slow queries will be tracked in the list of slow queries if their runtime exceeds - * the value set in slowQueryThreshold. In order for slow queries to be tracked, the enabled property must - * also be set to true. - */ - public Boolean getTrackSlowQueries() { - return trackSlowQueries; - } - - /** - * @param trackSlowQueries - * If set to true, then slow queries will be tracked in the list of slow queries if their runtime exceeds - * the value set in slowQueryThreshold. In order for slow queries to be tracked, the enabled property - * must also be set to true. - */ - public void setTrackSlowQueries(final Boolean trackSlowQueries) { - this.trackSlowQueries = trackSlowQueries; - } - - /** - * @return The maximum number of slow queries to keep in the list of slow queries. If the list of slow queries is - * full, the oldest entry in it will be discarded when additional slow queries occur. - */ - public Long getMaxSlowQueries() { - return maxSlowQueries; - } - - /** - * @param maxSlowQueries - * The maximum number of slow queries to keep in the list of slow queries. If the list of slow queries is - * full, the oldest entry in it will be discarded when additional slow queries occur. - */ - public void setMaxSlowQueries(final Long maxSlowQueries) { - this.maxSlowQueries = maxSlowQueries; - } - - /** - * @return The threshold value for treating a query as slow. A query with a runtime greater or equal to this - * threshold value will be put into the list of slow queries when slow query tracking is enabled. The value - * for slowQueryThreshold is specified in seconds. - */ - public Long getSlowQueryThreshold() { - return slowQueryThreshold; - } - - /** - * @param slowQueryThreshold - * The threshold value for treating a query as slow. A query with a runtime greater or equal to this - * threshold value will be put into the list of slow queries when slow query tracking is enabled. The - * value for slowQueryThreshold is specified in seconds. - */ - public void setSlowQueryThreshold(final Long slowQueryThreshold) { - this.slowQueryThreshold = slowQueryThreshold; - } - - /** - * @return The maximum query string length to keep in the list of queries. Query strings can have arbitrary lengths, - * and this property can be used to save memory in case very long query strings are used. The value is - * specified in bytes. - */ - public Long getMaxQueryStringLength() { - return maxQueryStringLength; - } - - /** - * @param maxQueryStringLength - * The maximum query string length to keep in the list of queries. Query strings can have arbitrary - * lengths, and this property can be used to save memory in case very long query strings are used. The - * value is specified in bytes. - */ - public void setMaxQueryStringLength(final Long maxQueryStringLength) { - this.maxQueryStringLength = maxQueryStringLength; - } - -} diff --git a/src/main/java/com/arangodb/entity/UserEntity.java b/src/main/java/com/arangodb/entity/UserEntity.java deleted file mode 100644 index 568d86145..000000000 --- a/src/main/java/com/arangodb/entity/UserEntity.java +++ /dev/null @@ -1,62 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.entity; - -import java.util.Map; - -/** - * @author Mark Vollmary - * - * @see API Documentation - */ -public class UserEntity { - - private String user; - private Boolean active; - private Map extra; - private Boolean changePassword; - - /** - * @return The name of the user as a string - */ - public String getUser() { - return user; - } - - /** - * @return An flag that specifies whether the user is active - */ - public Boolean getActive() { - return active; - } - - /** - * @return An object with arbitrary extra data about the user - */ - public Map getExtra() { - return extra; - } - - public Boolean getChangePassword() { - return changePassword; - } - -} diff --git a/src/main/java/com/arangodb/internal/ArangoCursorIterator.java b/src/main/java/com/arangodb/internal/ArangoCursorIterator.java deleted file mode 100644 index d7f18bed9..000000000 --- a/src/main/java/com/arangodb/internal/ArangoCursorIterator.java +++ /dev/null @@ -1,87 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.internal; - -import java.util.Iterator; -import java.util.NoSuchElementException; - -import com.arangodb.ArangoCursor; -import com.arangodb.entity.CursorEntity; -import com.arangodb.internal.net.HostHandle; -import com.arangodb.velocypack.VPackSlice; - -/** - * @author Mark Vollmary - * @param - * - */ -public class ArangoCursorIterator implements Iterator { - - private CursorEntity result; - private int pos; - - private final ArangoCursor cursor; - private final InternalArangoDatabase db; - private final ArangoCursorExecute execute; - private final HostHandle hostHandle; - - public ArangoCursorIterator(final ArangoCursor cursor, final ArangoCursorExecute execute, - final InternalArangoDatabase db, final CursorEntity result, final HostHandle hostHandle) { - super(); - this.cursor = cursor; - this.execute = execute; - this.db = db; - this.result = result; - this.hostHandle = hostHandle; - pos = 0; - } - - public CursorEntity getResult() { - return result; - } - - @Override - public boolean hasNext() { - return pos < result.getResult().size() || result.getHasMore(); - } - - @Override - public T next() { - if (pos >= result.getResult().size() && result.getHasMore()) { - result = execute.next(cursor.getId(), hostHandle); - pos = 0; - } - if (!hasNext()) { - throw new NoSuchElementException(); - } - return deserialize(result.getResult().get(pos++), cursor.getType()); - } - - protected R deserialize(final VPackSlice result, final Class type) { - return db.util().deserialize(result, type); - } - - @Override - public void remove() { - throw new UnsupportedOperationException(); - } - -} diff --git a/src/main/java/com/arangodb/internal/ArangoDBConstants.java b/src/main/java/com/arangodb/internal/ArangoDBConstants.java deleted file mode 100644 index c9851e6f4..000000000 --- a/src/main/java/com/arangodb/internal/ArangoDBConstants.java +++ /dev/null @@ -1,125 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.internal; - -import com.arangodb.Protocol; -import com.arangodb.entity.LoadBalancingStrategy; - -/** - * @author Mark Vollmary - * - */ -public class ArangoDBConstants { - - public static final String DEFAULT_HOST = "127.0.0.1"; - public static final Integer DEFAULT_PORT = 8529; - public static final Integer DEFAULT_TIMEOUT = 0; - public static final String DEFAULT_USER = "root"; - public static final Boolean DEFAULT_USE_SSL = false; - - public static final int INTEGER_BYTES = Integer.SIZE / Byte.SIZE; - public static final int LONG_BYTES = Long.SIZE / Byte.SIZE; - public static final int CHUNK_MIN_HEADER_SIZE = INTEGER_BYTES + INTEGER_BYTES + LONG_BYTES; - public static final int CHUNK_MAX_HEADER_SIZE = CHUNK_MIN_HEADER_SIZE + LONG_BYTES; - public static final int CHUNK_DEFAULT_CONTENT_SIZE = 30000; - public static final int MAX_CONNECTIONS_VST_DEFAULT = 1; - public static final int MAX_CONNECTIONS_HTTP_DEFAULT = 20; - public static final Protocol DEFAULT_NETWORK_PROTOCOL = Protocol.VST; - public static final boolean DEFAULT_ACQUIRE_HOST_LIST = false; - public static final LoadBalancingStrategy DEFAULT_LOAD_BALANCING_STRATEGY = LoadBalancingStrategy.NONE; - - public static final String PATH_API_DOCUMENT = "/_api/document"; - public static final String PATH_API_COLLECTION = "/_api/collection"; - public static final String PATH_API_DATABASE = "/_api/database"; - public static final String PATH_API_VERSION = "/_api/version"; - public static final String PATH_API_INDEX = "/_api/index"; - public static final String PATH_API_USER = "/_api/user"; - public static final String PATH_API_CURSOR = "/_api/cursor"; - public static final String PATH_API_GHARIAL = "/_api/gharial"; - public static final String PATH_API_TRANSACTION = "/_api/transaction"; - public static final String PATH_API_AQLFUNCTION = "/_api/aqlfunction"; - public static final String PATH_API_EXPLAIN = "/_api/explain"; - public static final String PATH_API_QUERY = "/_api/query"; - public static final String PATH_API_QUERY_CACHE = "/_api/query-cache"; - public static final String PATH_API_QUERY_CACHE_PROPERTIES = "/_api/query-cache/properties"; - public static final String PATH_API_QUERY_PROPERTIES = "/_api/query/properties"; - public static final String PATH_API_QUERY_CURRENT = "/_api/query/current"; - public static final String PATH_API_QUERY_SLOW = "/_api/query/slow"; - public static final String PATH_API_TRAVERSAL = "/_api/traversal"; - public static final String PATH_API_ADMIN_LOG = "/_admin/log"; - public static final String PATH_API_ADMIN_LOG_LEVEL = "/_admin/log/level"; - public static final String PATH_API_ADMIN_ROUTING_RELOAD = "/_admin/routing/reload"; - public static final String PATH_API_IMPORT = "/_api/import"; - public static final String PATH_API_ROLE = "/_admin/server/role"; - public static final String PATH_ENDPOINTS = "/_api/cluster/endpoints"; - - public static final String ENCRYPTION_PLAIN = "plain"; - - public static final String SYSTEM = "_system"; - public static final String ID = "id"; - public static final String RESULT = "result"; - public static final String VISITED = "visited"; - public static final String VERTICES = "vertices"; - public static final String EDGES = "edges"; - public static final String WAIT_FOR_SYNC = "waitForSync"; - public static final String IF_NONE_MATCH = "If-None-Match"; - public static final String IF_MATCH = "If-Match"; - public static final String KEEP_NULL = "keepNull"; - public static final String MERGE_OBJECTS = "mergeObjects"; - public static final String IGNORE_REVS = "ignoreRevs"; - public static final String RETURN_NEW = "returnNew"; - public static final String NEW = "new"; - public static final String RETURN_OLD = "returnOld"; - public static final String OLD = "old"; - public static final String COLLECTION = "collection"; - public static final String COLLECTIONS = "collections"; - public static final String EXCLUDE_SYSTEM = "excludeSystem"; - public static final String USER = "user"; - public static final String DATABASE = "database"; - public static final String CURRENT = "current"; - public static final String INDEXES = "indexes"; - public static final String TRUNCATE = "truncate"; - public static final String COUNT = "count"; - public static final String LOAD = "load"; - public static final String UNLOAD = "unload"; - public static final String PROPERTIES = "properties"; - public static final String RENAME = "rename"; - public static final String REVISION = "revision"; - public static final String FULLCOUNT = "fullCount"; - public static final String GROUP = "group"; - public static final String NAMESPACE = "namespace"; - public static final String GRAPH = "graph"; - public static final String GRAPHS = "graphs"; - public static final String VERTEX = "vertex"; - public static final String EDGE = "edge"; - public static final String ERROR = "error"; - public static final String FROM_PREFIX = "fromPrefix"; - public static final String TO_PREFIX = "toPrefix"; - public static final String OVERWRITE = "overwrite"; - public static final String ON_DUPLICATE = "onDuplicate"; - public static final String COMPLETE = "complete"; - public static final String DETAILS = "details"; - public static final String TYPE = "type"; - public static final String IS_SYSTEM = "isSystem"; - public static final String ROLE = "role"; - public static final String ENDPOINTS = "endpoints"; - -} diff --git a/src/main/java/com/arangodb/internal/ArangoExecuteable.java b/src/main/java/com/arangodb/internal/ArangoExecuteable.java deleted file mode 100644 index 2402ad7e9..000000000 --- a/src/main/java/com/arangodb/internal/ArangoExecuteable.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.internal; - -import com.arangodb.internal.velocystream.internal.VstConnection; -import com.arangodb.util.ArangoSerialization; - -/** - * @author Mark Vollmary - * - */ -public abstract class ArangoExecuteable { - - protected final E executor; - private final ArangoSerialization util; - - public ArangoExecuteable(final E executor, final ArangoSerialization util) { - super(); - this.executor = executor; - this.util = util; - } - - protected E executor() { - return executor; - } - - public ArangoSerialization util() { - return util; - } -} diff --git a/src/main/java/com/arangodb/internal/ArangoExecutor.java b/src/main/java/com/arangodb/internal/ArangoExecutor.java deleted file mode 100644 index 80c0be17b..000000000 --- a/src/main/java/com/arangodb/internal/ArangoExecutor.java +++ /dev/null @@ -1,151 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.internal; - -import java.io.UnsupportedEncodingException; -import java.lang.reflect.Type; -import java.util.Map; -import java.util.regex.Pattern; - -import com.arangodb.ArangoDBException; -import com.arangodb.internal.util.EncodeUtils; -import com.arangodb.util.ArangoSerialization; -import com.arangodb.util.ArangoSerializer; -import com.arangodb.velocypack.VPackSlice; -import com.arangodb.velocypack.exception.VPackException; -import com.arangodb.velocystream.Response; - -/** - * @author Mark Vollmary - * - */ -public abstract class ArangoExecutor { - - private static final String SLASH = "/"; - - public static interface ResponseDeserializer { - T deserialize(Response response) throws VPackException; - } - - protected static final String REGEX_KEY = "[^/]+"; - protected static final String REGEX_ID = "[^/]+/[^/]+"; - - private final DocumentCache documentCache; - private final ArangoSerialization util; - - protected ArangoExecutor(final ArangoSerialization util, final DocumentCache documentCache) { - super(); - this.documentCache = documentCache; - this.util = util; - } - - public DocumentCache documentCache() { - return documentCache; - } - - protected ArangoSerialization util() { - return util; - } - - protected String createPath(final String... params) { - final StringBuilder sb = new StringBuilder(); - for (int i = 0; i < params.length; i++) { - if (i > 0) { - sb.append(SLASH); - } - try { - final String param; - if (params[i].contains(SLASH)) { - param = createPath(params[i].split(SLASH)); - } else { - param = EncodeUtils.encodeURL(params[i]); - } - sb.append(param); - } catch (final UnsupportedEncodingException e) { - throw new ArangoDBException(e); - } - } - return sb.toString(); - } - - public void validateIndexId(final String id) { - validateName("index id", REGEX_ID, id); - } - - public void validateDocumentKey(final String key) throws ArangoDBException { - validateName("document key", REGEX_KEY, key); - } - - public void validateDocumentId(final String id) throws ArangoDBException { - validateName("document id", REGEX_ID, id); - } - - public String createDocumentHandle(final String collection, final String key) { - validateDocumentKey(key); - return new StringBuffer().append(collection).append(SLASH).append(key).toString(); - } - - protected void validateName(final String type, final String regex, final CharSequence name) - throws ArangoDBException { - if (!Pattern.matches(regex, name)) { - throw new ArangoDBException(String.format("%s %s is not valid.", type, name)); - } - } - - @SuppressWarnings("unchecked") - protected T createResult(final Type type, final Response response) { - return (T) ((type != Void.class && response.getBody() != null) ? deserialize(response.getBody(), type) : null); - } - - @Deprecated - protected T deserialize(final VPackSlice vpack, final Type type) throws ArangoDBException { - return util.deserialize(vpack, type); - } - - @Deprecated - protected VPackSlice serialize(final Object entity) throws ArangoDBException { - return util.serialize(entity); - } - - @Deprecated - protected VPackSlice serialize(final Object entity, final boolean serializeNullValues) throws ArangoDBException { - return util.serialize(entity, new ArangoSerializer.Options().serializeNullValues(serializeNullValues)); - } - - @Deprecated - protected VPackSlice serialize(final Object entity, final Type type) throws ArangoDBException { - return util.serialize(entity, new ArangoSerializer.Options().type(type)); - } - - @Deprecated - protected VPackSlice serialize(final Object entity, final Type type, final boolean serializeNullValues) - throws ArangoDBException { - return util.serialize(entity, - new ArangoSerializer.Options().type(type).serializeNullValues(serializeNullValues)); - } - - @Deprecated - protected VPackSlice serialize(final Object entity, final Type type, final Map additionalFields) - throws ArangoDBException { - return util.serialize(entity, new ArangoSerializer.Options().type(type).additionalFields(additionalFields)); - } - -} diff --git a/src/main/java/com/arangodb/internal/ArangoExecutorSync.java b/src/main/java/com/arangodb/internal/ArangoExecutorSync.java deleted file mode 100644 index 703bcae64..000000000 --- a/src/main/java/com/arangodb/internal/ArangoExecutorSync.java +++ /dev/null @@ -1,85 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.internal; - -import java.io.IOException; -import java.lang.reflect.Type; - -import com.arangodb.ArangoDBException; -import com.arangodb.internal.net.CommunicationProtocol; -import com.arangodb.internal.net.HostHandle; -import com.arangodb.util.ArangoSerialization; -import com.arangodb.velocypack.exception.VPackException; -import com.arangodb.velocystream.Request; -import com.arangodb.velocystream.Response; - -/** - * @author Mark Vollmary - * - */ -public class ArangoExecutorSync extends ArangoExecutor { - - private final CommunicationProtocol protocol; - - public ArangoExecutorSync(final CommunicationProtocol protocol, final ArangoSerialization util, - final DocumentCache documentCache) { - super(util, documentCache); - this.protocol = protocol; - } - - public T execute(final Request request, final Type type) throws ArangoDBException { - return execute(request, type, null); - } - - public T execute(final Request request, final Type type, final HostHandle hostHandle) throws ArangoDBException { - return execute(request, new ResponseDeserializer() { - @Override - public T deserialize(final Response response) throws VPackException { - return createResult(type, response); - } - }, hostHandle); - } - - public T execute(final Request request, final ResponseDeserializer responseDeserializer) - throws ArangoDBException { - return execute(request, responseDeserializer, null); - } - - public T execute( - final Request request, - final ResponseDeserializer responseDeserializer, - final HostHandle hostHandle) throws ArangoDBException { - try { - final Response response = protocol.execute(request, hostHandle); - return responseDeserializer.deserialize(response); - } catch (final VPackException e) { - throw new ArangoDBException(e); - } - } - - public void disconnect() { - try { - protocol.close(); - } catch (final IOException e) { - throw new ArangoDBException(e); - } - } -} diff --git a/src/main/java/com/arangodb/internal/CollectionCache.java b/src/main/java/com/arangodb/internal/CollectionCache.java deleted file mode 100644 index 080a76167..000000000 --- a/src/main/java/com/arangodb/internal/CollectionCache.java +++ /dev/null @@ -1,108 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.internal; - -import java.util.Date; -import java.util.HashMap; -import java.util.Map; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import com.arangodb.ArangoDBException; -import com.arangodb.ArangoDatabase; -import com.arangodb.entity.CollectionEntity; - -/** - * @author Mark Vollmary - * - */ -public class CollectionCache { - - private static final Logger LOGGER = LoggerFactory.getLogger(CollectionCache.class); - private static final long MAX_CACHE_TIME = 600000; - - private static class CollectionInfo { - private final String name; - private final long time; - - public CollectionInfo(final String name, final long time) { - super(); - this.name = name; - this.time = time; - } - } - - public static interface DBAccess { - ArangoDatabase db(final String name); - } - - private final Map> cache; - private DBAccess access; - private String db; - - public CollectionCache() { - super(); - cache = new HashMap>(); - } - - public void init(final DBAccess access) { - this.access = access; - } - - public void setDb(final String db) { - this.db = db; - } - - public String getCollectionName(final long id) { - final CollectionInfo info = getInfo(id); - return info != null ? info.name : null; - } - - private CollectionInfo getInfo(final long id) { - Map dbCache = cache.get(db); - if (dbCache == null) { - dbCache = new HashMap(); - cache.put(db, dbCache); - } - CollectionInfo info = dbCache.get(id); - if (info == null || isExpired(info.time)) { - try { - final String name = execute(id); - info = new CollectionInfo(name, new Date().getTime()); - dbCache.put(id, info); - } catch (final ArangoDBException e) { - LOGGER.error(e.getMessage(), e); - } - } - return info; - } - - private String execute(final long id) throws ArangoDBException { - final CollectionEntity result = access.db(db).collection(String.valueOf(id)).getInfo(); - return result.getName(); - } - - private boolean isExpired(final long time) { - return new Date().getTime() > time + MAX_CACHE_TIME; - } - -} diff --git a/src/main/java/com/arangodb/internal/DocumentCache.java b/src/main/java/com/arangodb/internal/DocumentCache.java deleted file mode 100644 index d0b1c1263..000000000 --- a/src/main/java/com/arangodb/internal/DocumentCache.java +++ /dev/null @@ -1,111 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.internal; - -import java.lang.reflect.Field; -import java.lang.reflect.Modifier; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.HashMap; -import java.util.Map; -import java.util.Map.Entry; - -import com.arangodb.ArangoDBException; -import com.arangodb.entity.DocumentField; -import com.arangodb.entity.DocumentField.Type; - -/** - * @author Mark Vollmary - * - */ -public class DocumentCache { - - private final Map, Map> cache; - - public DocumentCache() { - super(); - cache = new HashMap, Map>(); - } - - public void setValues(final Object doc, final Map values) throws ArangoDBException { - try { - final Map fields = getFields(doc.getClass()); - for (final Entry value : values.entrySet()) { - final Field field = fields.get(value.getKey()); - if (field != null) { - field.set(doc, value.getValue()); - } - } - } catch (final IllegalArgumentException e) { - throw new ArangoDBException(e); - } catch (final IllegalAccessException e) { - throw new ArangoDBException(e); - } - } - - private Map getFields(final Class clazz) { - Map fields = new HashMap(); - if (!isTypeRestricted(clazz)) { - fields = cache.get(clazz); - if (fields == null) { - fields = createFields(clazz); - cache.put(clazz, fields); - } - } - return fields; - } - - private boolean isTypeRestricted(final Class type) { - return Map.class.isAssignableFrom(type) || Collection.class.isAssignableFrom(type); - } - - private Map createFields(final Class clazz) { - final Map fields = new HashMap(); - Class tmp = clazz; - final Collection values = new ArrayList( - Arrays.asList(DocumentField.Type.values())); - while (tmp != null && tmp != Object.class && values.size() > 0) { - final Field[] declaredFields = tmp.getDeclaredFields(); - for (int i = 0; i < declaredFields.length && values.size() > 0; i++) { - findAnnotation(values, fields, declaredFields[i]); - } - tmp = tmp.getSuperclass(); - } - return fields; - } - - private void findAnnotation( - final Collection values, - final Map fields, - final Field field) { - final DocumentField annotation = field.getAnnotation(DocumentField.class); - if (annotation != null && !field.isSynthetic() && !Modifier.isStatic(field.getModifiers()) - && String.class.isAssignableFrom(field.getType())) { - final Type value = annotation.value(); - if (values.contains(value)) { - field.setAccessible(true); - fields.put(value, field); - values.remove(value); - } - } - } -} diff --git a/src/main/java/com/arangodb/internal/Host.java b/src/main/java/com/arangodb/internal/Host.java deleted file mode 100644 index cb19010d9..000000000 --- a/src/main/java/com/arangodb/internal/Host.java +++ /dev/null @@ -1,85 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.internal; - -/** - * @author Mark Vollmary - * - */ -public class Host { - - private final String host; - private final int port; - - public Host(final String host, final int port) { - super(); - this.host = host; - this.port = port; - } - - public String getHost() { - return host; - } - - public int getPort() { - return port; - } - - @Override - public String toString() { - return String.format("host[addr=%s,port=%s]", host, port); - } - - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + ((host == null) ? 0 : host.hashCode()); - result = prime * result + port; - return result; - } - - @Override - public boolean equals(final Object obj) { - if (this == obj) { - return true; - } - if (obj == null) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - final Host other = (Host) obj; - if (host == null) { - if (other.host != null) { - return false; - } - } else if (!host.equals(other.host)) { - return false; - } - if (port != other.port) { - return false; - } - return true; - } - -} diff --git a/src/main/java/com/arangodb/internal/InternalArangoCollection.java b/src/main/java/com/arangodb/internal/InternalArangoCollection.java deleted file mode 100644 index 21b5ca9f4..000000000 --- a/src/main/java/com/arangodb/internal/InternalArangoCollection.java +++ /dev/null @@ -1,720 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.internal; - -import java.util.ArrayList; -import java.util.Collection; -import java.util.HashMap; -import java.util.Iterator; -import java.util.Map; - -import com.arangodb.ArangoDBException; -import com.arangodb.entity.DocumentCreateEntity; -import com.arangodb.entity.DocumentDeleteEntity; -import com.arangodb.entity.DocumentField; -import com.arangodb.entity.DocumentUpdateEntity; -import com.arangodb.entity.ErrorEntity; -import com.arangodb.entity.IndexEntity; -import com.arangodb.entity.MultiDocumentEntity; -import com.arangodb.entity.Permissions; -import com.arangodb.internal.ArangoExecutor.ResponseDeserializer; -import com.arangodb.internal.velocystream.internal.VstConnection; -import com.arangodb.model.CollectionPropertiesOptions; -import com.arangodb.model.CollectionRenameOptions; -import com.arangodb.model.DocumentCreateOptions; -import com.arangodb.model.DocumentDeleteOptions; -import com.arangodb.model.DocumentExistsOptions; -import com.arangodb.model.DocumentImportOptions; -import com.arangodb.model.DocumentReadOptions; -import com.arangodb.model.DocumentReplaceOptions; -import com.arangodb.model.DocumentUpdateOptions; -import com.arangodb.model.FulltextIndexOptions; -import com.arangodb.model.GeoIndexOptions; -import com.arangodb.model.HashIndexOptions; -import com.arangodb.model.ImportType; -import com.arangodb.model.OptionsBuilder; -import com.arangodb.model.PersistentIndexOptions; -import com.arangodb.model.SkiplistIndexOptions; -import com.arangodb.model.UserAccessOptions; -import com.arangodb.util.ArangoSerializer; -import com.arangodb.velocypack.Type; -import com.arangodb.velocypack.VPackSlice; -import com.arangodb.velocypack.exception.VPackException; -import com.arangodb.velocystream.Request; -import com.arangodb.velocystream.RequestType; -import com.arangodb.velocystream.Response; - -/** - * @author Mark Vollmary - * - */ -public class InternalArangoCollection, D extends InternalArangoDatabase, E extends ArangoExecutor, R, C extends VstConnection> - extends ArangoExecuteable { - - private final D db; - private final String name; - - public InternalArangoCollection(final D db, final String name) { - super(db.executor(), db.util()); - this.db = db; - this.name = name; - } - - public D db() { - return db; - } - - public String name() { - return name; - } - - protected Request insertDocumentRequest(final T value, final DocumentCreateOptions options) { - final Request request = new Request(db.name(), RequestType.POST, - executor.createPath(ArangoDBConstants.PATH_API_DOCUMENT, name)); - final DocumentCreateOptions params = (options != null ? options : new DocumentCreateOptions()); - request.putQueryParam(ArangoDBConstants.WAIT_FOR_SYNC, params.getWaitForSync()); - request.putQueryParam(ArangoDBConstants.RETURN_NEW, params.getReturnNew()); - request.setBody(util().serialize(value)); - return request; - } - - protected ResponseDeserializer> insertDocumentResponseDeserializer(final T value) { - return new ResponseDeserializer>() { - @SuppressWarnings("unchecked") - @Override - public DocumentCreateEntity deserialize(final Response response) throws VPackException { - final VPackSlice body = response.getBody(); - final DocumentCreateEntity doc = util().deserialize(body, DocumentCreateEntity.class); - final VPackSlice newDoc = body.get(ArangoDBConstants.NEW); - if (newDoc.isObject()) { - doc.setNew((T) util().deserialize(newDoc, value.getClass())); - } - final Map values = new HashMap(); - values.put(DocumentField.Type.ID, doc.getId()); - values.put(DocumentField.Type.KEY, doc.getKey()); - values.put(DocumentField.Type.REV, doc.getRev()); - executor.documentCache().setValues(value, values); - return doc; - } - }; - } - - protected Request insertDocumentsRequest(final Collection values, final DocumentCreateOptions params) { - final Request request = new Request(db.name(), RequestType.POST, - executor.createPath(ArangoDBConstants.PATH_API_DOCUMENT, name)); - request.putQueryParam(ArangoDBConstants.WAIT_FOR_SYNC, params.getWaitForSync()); - request.putQueryParam(ArangoDBConstants.RETURN_NEW, params.getReturnNew()); - request.setBody( - util().serialize(values, new ArangoSerializer.Options().serializeNullValues(false).stringAsJson(true))); - return request; - } - - @SuppressWarnings("unchecked") - protected ResponseDeserializer>> insertDocumentsResponseDeserializer( - final Collection values, - final DocumentCreateOptions params) { - return new ResponseDeserializer>>() { - @Override - public MultiDocumentEntity> deserialize(final Response response) - throws VPackException { - Class type = null; - if (params.getReturnNew() != null && params.getReturnNew()) { - if (!values.isEmpty()) { - type = (Class) values.iterator().next().getClass(); - } - } - final MultiDocumentEntity> multiDocument = new MultiDocumentEntity>(); - final Collection> docs = new ArrayList>(); - final Collection errors = new ArrayList(); - final Collection documentsAndErrors = new ArrayList(); - final VPackSlice body = response.getBody(); - for (final Iterator iterator = body.arrayIterator(); iterator.hasNext();) { - final VPackSlice next = iterator.next(); - if (next.get(ArangoDBConstants.ERROR).isTrue()) { - final ErrorEntity error = (ErrorEntity) util().deserialize(next, ErrorEntity.class); - errors.add(error); - documentsAndErrors.add(error); - } else { - final DocumentCreateEntity doc = util().deserialize(next, DocumentCreateEntity.class); - final VPackSlice newDoc = next.get(ArangoDBConstants.NEW); - if (newDoc.isObject()) { - doc.setNew((T) util().deserialize(newDoc, type)); - } - docs.add(doc); - documentsAndErrors.add(doc); - } - } - multiDocument.setDocuments(docs); - multiDocument.setErrors(errors); - multiDocument.setDocumentsAndErrors(documentsAndErrors); - return multiDocument; - } - }; - } - - protected Request importDocumentsRequest(final String values, final DocumentImportOptions options) { - return importDocumentsRequest(options).putQueryParam(ArangoDBConstants.TYPE, ImportType.auto) - .setBody(util().serialize(values)); - } - - protected Request importDocumentsRequest(final Collection values, final DocumentImportOptions options) { - return importDocumentsRequest(options).putQueryParam(ArangoDBConstants.TYPE, ImportType.list).setBody( - util().serialize(values, new ArangoSerializer.Options().serializeNullValues(false).stringAsJson(true))); - } - - protected Request importDocumentsRequest(final DocumentImportOptions options) { - final DocumentImportOptions params = options != null ? options : new DocumentImportOptions(); - return new Request(db.name(), RequestType.POST, ArangoDBConstants.PATH_API_IMPORT) - .putQueryParam(ArangoDBConstants.COLLECTION, name) - .putQueryParam(ArangoDBConstants.FROM_PREFIX, params.getFromPrefix()) - .putQueryParam(ArangoDBConstants.TO_PREFIX, params.getToPrefix()) - .putQueryParam(ArangoDBConstants.OVERWRITE, params.getOverwrite()) - .putQueryParam(ArangoDBConstants.WAIT_FOR_SYNC, params.getWaitForSync()) - .putQueryParam(ArangoDBConstants.ON_DUPLICATE, params.getOnDuplicate()) - .putQueryParam(ArangoDBConstants.COMPLETE, params.getComplete()) - .putQueryParam(ArangoDBConstants.DETAILS, params.getDetails()); - } - - protected Request getDocumentRequest(final String key, final DocumentReadOptions options) { - final Request request = new Request(db.name(), RequestType.GET, - executor.createPath(ArangoDBConstants.PATH_API_DOCUMENT, executor.createDocumentHandle(name, key))); - final DocumentReadOptions params = (options != null ? options : new DocumentReadOptions()); - request.putHeaderParam(ArangoDBConstants.IF_NONE_MATCH, params.getIfNoneMatch()); - request.putHeaderParam(ArangoDBConstants.IF_MATCH, params.getIfMatch()); - return request; - } - - protected Request getDocumentsRequest(final Collection keys, final DocumentReadOptions options) { - return new Request(db.name(), RequestType.PUT, executor.createPath(ArangoDBConstants.PATH_API_DOCUMENT, name)) - .putQueryParam("onlyget", true) - .putHeaderParam(ArangoDBConstants.IF_NONE_MATCH, options.getIfNoneMatch()) - .putHeaderParam(ArangoDBConstants.IF_MATCH, options.getIfMatch()).setBody(util().serialize(keys)); - } - - protected ResponseDeserializer> getDocumentsResponseDeserializer( - final Class type, - final DocumentReadOptions options) { - return new ResponseDeserializer>() { - @SuppressWarnings("unchecked") - @Override - public MultiDocumentEntity deserialize(final Response response) throws VPackException { - final MultiDocumentEntity multiDocument = new MultiDocumentEntity(); - final Collection docs = new ArrayList(); - final Collection errors = new ArrayList(); - final Collection documentsAndErrors = new ArrayList(); - final VPackSlice body = response.getBody(); - for (final Iterator iterator = body.arrayIterator(); iterator.hasNext();) { - final VPackSlice next = iterator.next(); - if (next.get(ArangoDBConstants.ERROR).isTrue()) { - final ErrorEntity error = (ErrorEntity) util().deserialize(next, ErrorEntity.class); - errors.add(error); - documentsAndErrors.add(error); - } else { - final T doc = (T) util().deserialize(next, type); - docs.add(doc); - documentsAndErrors.add(doc); - } - } - multiDocument.setDocuments(docs); - multiDocument.setErrors(errors); - multiDocument.setDocumentsAndErrors(documentsAndErrors); - return multiDocument; - } - }; - } - - protected Request replaceDocumentRequest( - final String key, - final T value, - final DocumentReplaceOptions options) { - final Request request = new Request(db.name(), RequestType.PUT, - executor.createPath(ArangoDBConstants.PATH_API_DOCUMENT, executor.createDocumentHandle(name, key))); - final DocumentReplaceOptions params = (options != null ? options : new DocumentReplaceOptions()); - request.putQueryParam(ArangoDBConstants.WAIT_FOR_SYNC, params.getWaitForSync()); - request.putQueryParam(ArangoDBConstants.IGNORE_REVS, params.getIgnoreRevs()); - request.putQueryParam(ArangoDBConstants.RETURN_NEW, params.getReturnNew()); - request.putQueryParam(ArangoDBConstants.RETURN_OLD, params.getReturnOld()); - request.putHeaderParam(ArangoDBConstants.IF_MATCH, params.getIfMatch()); - request.setBody(util().serialize(value)); - return request; - } - - protected ResponseDeserializer> replaceDocumentResponseDeserializer(final T value) { - return new ResponseDeserializer>() { - @SuppressWarnings("unchecked") - @Override - public DocumentUpdateEntity deserialize(final Response response) throws VPackException { - final VPackSlice body = response.getBody(); - final DocumentUpdateEntity doc = util().deserialize(body, DocumentUpdateEntity.class); - final VPackSlice newDoc = body.get(ArangoDBConstants.NEW); - if (newDoc.isObject()) { - doc.setNew((T) util().deserialize(newDoc, value.getClass())); - } - final VPackSlice oldDoc = body.get(ArangoDBConstants.OLD); - if (oldDoc.isObject()) { - doc.setOld((T) util().deserialize(oldDoc, value.getClass())); - } - final Map values = new HashMap(); - values.put(DocumentField.Type.REV, doc.getRev()); - executor.documentCache().setValues(value, values); - return doc; - } - }; - } - - protected Request replaceDocumentsRequest(final Collection values, final DocumentReplaceOptions params) { - final Request request; - request = new Request(db.name(), RequestType.PUT, - executor.createPath(ArangoDBConstants.PATH_API_DOCUMENT, name)); - request.putQueryParam(ArangoDBConstants.WAIT_FOR_SYNC, params.getWaitForSync()); - request.putQueryParam(ArangoDBConstants.IGNORE_REVS, params.getIgnoreRevs()); - request.putQueryParam(ArangoDBConstants.RETURN_NEW, params.getReturnNew()); - request.putQueryParam(ArangoDBConstants.RETURN_OLD, params.getReturnOld()); - request.putHeaderParam(ArangoDBConstants.IF_MATCH, params.getIfMatch()); - request.setBody( - util().serialize(values, new ArangoSerializer.Options().serializeNullValues(false).stringAsJson(true))); - return request; - } - - @SuppressWarnings("unchecked") - protected ResponseDeserializer>> replaceDocumentsResponseDeserializer( - final Collection values, - final DocumentReplaceOptions params) { - return new ResponseDeserializer>>() { - @Override - public MultiDocumentEntity> deserialize(final Response response) - throws VPackException { - Class type = null; - if ((params.getReturnNew() != null && params.getReturnNew()) - || (params.getReturnOld() != null && params.getReturnOld())) { - if (!values.isEmpty()) { - type = (Class) values.iterator().next().getClass(); - } - } - final MultiDocumentEntity> multiDocument = new MultiDocumentEntity>(); - final Collection> docs = new ArrayList>(); - final Collection errors = new ArrayList(); - final Collection documentsAndErrors = new ArrayList(); - final VPackSlice body = response.getBody(); - for (final Iterator iterator = body.arrayIterator(); iterator.hasNext();) { - final VPackSlice next = iterator.next(); - if (next.get(ArangoDBConstants.ERROR).isTrue()) { - final ErrorEntity error = (ErrorEntity) util().deserialize(next, ErrorEntity.class); - errors.add(error); - documentsAndErrors.add(error); - } else { - final DocumentUpdateEntity doc = util().deserialize(next, DocumentUpdateEntity.class); - final VPackSlice newDoc = next.get(ArangoDBConstants.NEW); - if (newDoc.isObject()) { - doc.setNew((T) util().deserialize(newDoc, type)); - } - final VPackSlice oldDoc = next.get(ArangoDBConstants.OLD); - if (oldDoc.isObject()) { - doc.setOld((T) util().deserialize(oldDoc, type)); - } - docs.add(doc); - documentsAndErrors.add(doc); - } - } - multiDocument.setDocuments(docs); - multiDocument.setErrors(errors); - multiDocument.setDocumentsAndErrors(documentsAndErrors); - return multiDocument; - } - }; - } - - protected Request updateDocumentRequest(final String key, final T value, final DocumentUpdateOptions options) { - final Request request; - request = new Request(db.name(), RequestType.PATCH, - executor.createPath(ArangoDBConstants.PATH_API_DOCUMENT, executor.createDocumentHandle(name, key))); - final DocumentUpdateOptions params = (options != null ? options : new DocumentUpdateOptions()); - request.putQueryParam(ArangoDBConstants.KEEP_NULL, params.getKeepNull()); - request.putQueryParam(ArangoDBConstants.MERGE_OBJECTS, params.getMergeObjects()); - request.putQueryParam(ArangoDBConstants.WAIT_FOR_SYNC, params.getWaitForSync()); - request.putQueryParam(ArangoDBConstants.IGNORE_REVS, params.getIgnoreRevs()); - request.putQueryParam(ArangoDBConstants.RETURN_NEW, params.getReturnNew()); - request.putQueryParam(ArangoDBConstants.RETURN_OLD, params.getReturnOld()); - request.putHeaderParam(ArangoDBConstants.IF_MATCH, params.getIfMatch()); - request.setBody(util().serialize(value, new ArangoSerializer.Options() - .serializeNullValues(params.getSerializeNull() == null || params.getSerializeNull()))); - return request; - } - - protected ResponseDeserializer> updateDocumentResponseDeserializer(final T value) { - return new ResponseDeserializer>() { - @SuppressWarnings("unchecked") - @Override - public DocumentUpdateEntity deserialize(final Response response) throws VPackException { - final VPackSlice body = response.getBody(); - final DocumentUpdateEntity doc = util().deserialize(body, DocumentUpdateEntity.class); - final VPackSlice newDoc = body.get(ArangoDBConstants.NEW); - if (newDoc.isObject()) { - doc.setNew((T) util().deserialize(newDoc, value.getClass())); - } - final VPackSlice oldDoc = body.get(ArangoDBConstants.OLD); - if (oldDoc.isObject()) { - doc.setOld((T) util().deserialize(oldDoc, value.getClass())); - } - final Map values = new HashMap(); - values.put(DocumentField.Type.REV, doc.getRev()); - executor.documentCache().setValues(value, values); - return doc; - } - }; - } - - protected Request updateDocumentsRequest(final Collection values, final DocumentUpdateOptions params) { - final Request request; - request = new Request(db.name(), RequestType.PATCH, - executor.createPath(ArangoDBConstants.PATH_API_DOCUMENT, name)); - final Boolean keepNull = params.getKeepNull(); - request.putQueryParam(ArangoDBConstants.KEEP_NULL, keepNull); - request.putQueryParam(ArangoDBConstants.MERGE_OBJECTS, params.getMergeObjects()); - request.putQueryParam(ArangoDBConstants.WAIT_FOR_SYNC, params.getWaitForSync()); - request.putQueryParam(ArangoDBConstants.IGNORE_REVS, params.getIgnoreRevs()); - request.putQueryParam(ArangoDBConstants.RETURN_NEW, params.getReturnNew()); - request.putQueryParam(ArangoDBConstants.RETURN_OLD, params.getReturnOld()); - request.putHeaderParam(ArangoDBConstants.IF_MATCH, params.getIfMatch()); - request.setBody( - util().serialize(values, new ArangoSerializer.Options().serializeNullValues(false).stringAsJson(true))); - return request; - } - - @SuppressWarnings("unchecked") - protected ResponseDeserializer>> updateDocumentsResponseDeserializer( - final Collection values, - final DocumentUpdateOptions params) { - return new ResponseDeserializer>>() { - @Override - public MultiDocumentEntity> deserialize(final Response response) - throws VPackException { - Class type = null; - if ((params.getReturnNew() != null && params.getReturnNew()) - || (params.getReturnOld() != null && params.getReturnOld())) { - if (!values.isEmpty()) { - type = (Class) values.iterator().next().getClass(); - } - } - final MultiDocumentEntity> multiDocument = new MultiDocumentEntity>(); - final Collection> docs = new ArrayList>(); - final Collection errors = new ArrayList(); - final Collection documentsAndErrors = new ArrayList(); - final VPackSlice body = response.getBody(); - for (final Iterator iterator = body.arrayIterator(); iterator.hasNext();) { - final VPackSlice next = iterator.next(); - if (next.get(ArangoDBConstants.ERROR).isTrue()) { - final ErrorEntity error = (ErrorEntity) util().deserialize(next, ErrorEntity.class); - errors.add(error); - documentsAndErrors.add(error); - } else { - final DocumentUpdateEntity doc = util().deserialize(next, DocumentUpdateEntity.class); - final VPackSlice newDoc = next.get(ArangoDBConstants.NEW); - if (newDoc.isObject()) { - doc.setNew((T) util().deserialize(newDoc, type)); - } - final VPackSlice oldDoc = next.get(ArangoDBConstants.OLD); - if (oldDoc.isObject()) { - doc.setOld((T) util().deserialize(oldDoc, type)); - } - docs.add(doc); - documentsAndErrors.add(doc); - } - } - multiDocument.setDocuments(docs); - multiDocument.setErrors(errors); - multiDocument.setDocumentsAndErrors(documentsAndErrors); - return multiDocument; - } - }; - } - - protected Request deleteDocumentRequest(final String key, final DocumentDeleteOptions options) { - final Request request; - request = new Request(db.name(), RequestType.DELETE, - executor.createPath(ArangoDBConstants.PATH_API_DOCUMENT, executor.createDocumentHandle(name, key))); - final DocumentDeleteOptions params = (options != null ? options : new DocumentDeleteOptions()); - request.putQueryParam(ArangoDBConstants.WAIT_FOR_SYNC, params.getWaitForSync()); - request.putQueryParam(ArangoDBConstants.RETURN_OLD, params.getReturnOld()); - request.putHeaderParam(ArangoDBConstants.IF_MATCH, params.getIfMatch()); - return request; - } - - protected ResponseDeserializer> deleteDocumentResponseDeserializer( - final Class type) { - return new ResponseDeserializer>() { - @SuppressWarnings("unchecked") - @Override - public DocumentDeleteEntity deserialize(final Response response) throws VPackException { - final VPackSlice body = response.getBody(); - final DocumentDeleteEntity doc = util().deserialize(body, DocumentDeleteEntity.class); - final VPackSlice oldDoc = body.get(ArangoDBConstants.OLD); - if (oldDoc.isObject()) { - doc.setOld((T) util().deserialize(oldDoc, type)); - } - return doc; - } - }; - } - - protected Request deleteDocumentsRequest(final Collection keys, final DocumentDeleteOptions options) { - final Request request; - request = new Request(db.name(), RequestType.DELETE, - executor.createPath(ArangoDBConstants.PATH_API_DOCUMENT, name)); - final DocumentDeleteOptions params = (options != null ? options : new DocumentDeleteOptions()); - request.putQueryParam(ArangoDBConstants.WAIT_FOR_SYNC, params.getWaitForSync()); - request.putQueryParam(ArangoDBConstants.RETURN_OLD, params.getReturnOld()); - request.setBody(util().serialize(keys)); - return request; - } - - protected ResponseDeserializer>> deleteDocumentsResponseDeserializer( - final Class type) { - return new ResponseDeserializer>>() { - @SuppressWarnings("unchecked") - @Override - public MultiDocumentEntity> deserialize(final Response response) - throws VPackException { - final MultiDocumentEntity> multiDocument = new MultiDocumentEntity>(); - final Collection> docs = new ArrayList>(); - final Collection errors = new ArrayList(); - final Collection documentsAndErrors = new ArrayList(); - final VPackSlice body = response.getBody(); - for (final Iterator iterator = body.arrayIterator(); iterator.hasNext();) { - final VPackSlice next = iterator.next(); - if (next.get(ArangoDBConstants.ERROR).isTrue()) { - final ErrorEntity error = (ErrorEntity) util().deserialize(next, ErrorEntity.class); - errors.add(error); - documentsAndErrors.add(error); - } else { - final DocumentDeleteEntity doc = util().deserialize(next, DocumentDeleteEntity.class); - final VPackSlice oldDoc = next.get(ArangoDBConstants.OLD); - if (oldDoc.isObject()) { - doc.setOld((T) util().deserialize(oldDoc, type)); - } - docs.add(doc); - documentsAndErrors.add(doc); - } - } - multiDocument.setDocuments(docs); - multiDocument.setErrors(errors); - multiDocument.setDocumentsAndErrors(documentsAndErrors); - return multiDocument; - } - }; - } - - protected Request documentExistsRequest(final String key, final DocumentExistsOptions options) { - final Request request; - request = new Request(db.name(), RequestType.HEAD, - executor.createPath(ArangoDBConstants.PATH_API_DOCUMENT, executor.createDocumentHandle(name, key))); - final DocumentExistsOptions params = (options != null ? options : new DocumentExistsOptions()); - request.putHeaderParam(ArangoDBConstants.IF_MATCH, params.getIfMatch()); - request.putHeaderParam(ArangoDBConstants.IF_NONE_MATCH, params.getIfNoneMatch()); - return request; - } - - protected Request getIndexRequest(final String id) { - return new Request(db.name(), RequestType.GET, - executor.createPath(ArangoDBConstants.PATH_API_INDEX, createIndexId(id))); - } - - protected Request deleteIndexRequest(final String id) { - return new Request(db.name(), RequestType.DELETE, - executor.createPath(ArangoDBConstants.PATH_API_INDEX, createIndexId(id))); - } - - protected ResponseDeserializer deleteIndexResponseDeserializer() { - return new ResponseDeserializer() { - @Override - public String deserialize(final Response response) throws VPackException { - return response.getBody().get(ArangoDBConstants.ID).getAsString(); - } - }; - } - - private String createIndexId(final String id) { - final String index; - if (id.matches(ArangoExecutor.REGEX_ID)) { - index = id; - } else if (id.matches(ArangoExecutor.REGEX_KEY)) { - index = name + "/" + id; - } else { - throw new ArangoDBException(String.format("index id %s is not valid.", id)); - } - return index; - } - - protected Request createHashIndexRequest(final Iterable fields, final HashIndexOptions options) { - final Request request; - request = new Request(db.name(), RequestType.POST, ArangoDBConstants.PATH_API_INDEX); - request.putQueryParam(ArangoDBConstants.COLLECTION, name); - request.setBody( - util().serialize(OptionsBuilder.build(options != null ? options : new HashIndexOptions(), fields))); - return request; - } - - protected Request createSkiplistIndexRequest(final Iterable fields, final SkiplistIndexOptions options) { - final Request request; - request = new Request(db.name(), RequestType.POST, ArangoDBConstants.PATH_API_INDEX); - request.putQueryParam(ArangoDBConstants.COLLECTION, name); - request.setBody( - util().serialize(OptionsBuilder.build(options != null ? options : new SkiplistIndexOptions(), fields))); - return request; - } - - protected Request createPersistentIndexRequest( - final Iterable fields, - final PersistentIndexOptions options) { - final Request request; - request = new Request(db.name(), RequestType.POST, ArangoDBConstants.PATH_API_INDEX); - request.putQueryParam(ArangoDBConstants.COLLECTION, name); - request.setBody( - util().serialize(OptionsBuilder.build(options != null ? options : new PersistentIndexOptions(), fields))); - return request; - } - - protected Request createGeoIndexRequest(final Iterable fields, final GeoIndexOptions options) { - final Request request; - request = new Request(db.name(), RequestType.POST, ArangoDBConstants.PATH_API_INDEX); - request.putQueryParam(ArangoDBConstants.COLLECTION, name); - request.setBody( - util().serialize(OptionsBuilder.build(options != null ? options : new GeoIndexOptions(), fields))); - return request; - } - - protected Request createFulltextIndexRequest(final Iterable fields, final FulltextIndexOptions options) { - final Request request; - request = new Request(db.name(), RequestType.POST, ArangoDBConstants.PATH_API_INDEX); - request.putQueryParam(ArangoDBConstants.COLLECTION, name); - request.setBody( - util().serialize(OptionsBuilder.build(options != null ? options : new FulltextIndexOptions(), fields))); - return request; - } - - protected Request getIndexesRequest() { - final Request request; - request = new Request(db.name(), RequestType.GET, ArangoDBConstants.PATH_API_INDEX); - request.putQueryParam(ArangoDBConstants.COLLECTION, name); - return request; - } - - protected ResponseDeserializer> getIndexesResponseDeserializer() { - return new ResponseDeserializer>() { - @Override - public Collection deserialize(final Response response) throws VPackException { - return util().deserialize(response.getBody().get(ArangoDBConstants.INDEXES), - new Type>() { - }.getType()); - } - }; - } - - protected Request truncateRequest() { - return new Request(db.name(), RequestType.PUT, - executor.createPath(ArangoDBConstants.PATH_API_COLLECTION, name, ArangoDBConstants.TRUNCATE)); - } - - protected Request countRequest() { - return new Request(db.name(), RequestType.GET, - executor.createPath(ArangoDBConstants.PATH_API_COLLECTION, name, ArangoDBConstants.COUNT)); - } - - protected Request dropRequest(final Boolean isSystem) { - return new Request(db.name(), RequestType.DELETE, - executor.createPath(ArangoDBConstants.PATH_API_COLLECTION, name)) - .putQueryParam(ArangoDBConstants.IS_SYSTEM, isSystem); - } - - protected Request loadRequest() { - return new Request(db.name(), RequestType.PUT, - executor.createPath(ArangoDBConstants.PATH_API_COLLECTION, name, ArangoDBConstants.LOAD)); - } - - protected Request unloadRequest() { - return new Request(db.name(), RequestType.PUT, - executor.createPath(ArangoDBConstants.PATH_API_COLLECTION, name, ArangoDBConstants.UNLOAD)); - } - - protected Request getInfoRequest() { - return new Request(db.name(), RequestType.GET, - executor.createPath(ArangoDBConstants.PATH_API_COLLECTION, name)); - } - - protected Request getPropertiesRequest() { - return new Request(db.name(), RequestType.GET, - executor.createPath(ArangoDBConstants.PATH_API_COLLECTION, name, ArangoDBConstants.PROPERTIES)); - } - - protected Request changePropertiesRequest(final CollectionPropertiesOptions options) { - final Request request; - request = new Request(db.name(), RequestType.PUT, - executor.createPath(ArangoDBConstants.PATH_API_COLLECTION, name, ArangoDBConstants.PROPERTIES)); - request.setBody(util().serialize(options != null ? options : new CollectionPropertiesOptions())); - return request; - } - - protected Request renameRequest(final String newName) { - final Request request; - request = new Request(db.name(), RequestType.PUT, - executor.createPath(ArangoDBConstants.PATH_API_COLLECTION, name, ArangoDBConstants.RENAME)); - request.setBody(util().serialize(OptionsBuilder.build(new CollectionRenameOptions(), newName))); - return request; - } - - protected Request getRevisionRequest() { - return new Request(db.name(), RequestType.GET, - executor.createPath(ArangoDBConstants.PATH_API_COLLECTION, name, ArangoDBConstants.REVISION)); - } - - protected Request grantAccessRequest(final String user, final Permissions permissions) { - return new Request(ArangoDBConstants.SYSTEM, RequestType.PUT, - executor.createPath(ArangoDBConstants.PATH_API_USER, user, ArangoDBConstants.DATABASE, db.name(), name)) - .setBody(util().serialize(OptionsBuilder.build(new UserAccessOptions(), permissions))); - } - - protected Request resetAccessRequest(final String user) { - return new Request(ArangoDBConstants.SYSTEM, RequestType.DELETE, executor - .createPath(ArangoDBConstants.PATH_API_USER, user, ArangoDBConstants.DATABASE, db.name(), name)); - } - - protected Request getPermissionsRequest(final String user) { - return new Request(ArangoDBConstants.SYSTEM, RequestType.GET, executor - .createPath(ArangoDBConstants.PATH_API_USER, user, ArangoDBConstants.DATABASE, db.name(), name)); - } - - protected ResponseDeserializer getPermissionsResponseDeserialzer() { - return new ResponseDeserializer() { - @Override - public Permissions deserialize(final Response response) throws VPackException { - final VPackSlice body = response.getBody(); - if (body != null) { - final VPackSlice result = body.get(ArangoDBConstants.RESULT); - if (!result.isNone()) { - return util().deserialize(result, Permissions.class); - } - } - return null; - } - }; - } - -} diff --git a/src/main/java/com/arangodb/internal/InternalArangoDB.java b/src/main/java/com/arangodb/internal/InternalArangoDB.java deleted file mode 100644 index d4615ceb8..000000000 --- a/src/main/java/com/arangodb/internal/InternalArangoDB.java +++ /dev/null @@ -1,311 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.internal; - -import java.util.ArrayList; -import java.util.Collection; -import java.util.Iterator; -import java.util.Map.Entry; -import java.util.Properties; - -import com.arangodb.ArangoDBException; -import com.arangodb.Protocol; -import com.arangodb.entity.LoadBalancingStrategy; -import com.arangodb.entity.LogLevelEntity; -import com.arangodb.entity.Permissions; -import com.arangodb.entity.ServerRole; -import com.arangodb.entity.UserEntity; -import com.arangodb.internal.ArangoExecutor.ResponseDeserializer; -import com.arangodb.internal.velocystream.internal.VstConnection; -import com.arangodb.model.DBCreateOptions; -import com.arangodb.model.LogOptions; -import com.arangodb.model.OptionsBuilder; -import com.arangodb.model.UserAccessOptions; -import com.arangodb.model.UserCreateOptions; -import com.arangodb.model.UserUpdateOptions; -import com.arangodb.util.ArangoSerialization; -import com.arangodb.velocypack.Type; -import com.arangodb.velocypack.VPackSlice; -import com.arangodb.velocypack.exception.VPackException; -import com.arangodb.velocystream.Request; -import com.arangodb.velocystream.RequestType; -import com.arangodb.velocystream.Response; - -/** - * @author Mark Vollmary - * @param - * @param - * - */ -public class InternalArangoDB extends ArangoExecuteable { - - private static final String PROPERTY_KEY_HOSTS = "arangodb.hosts"; - private static final String PROPERTY_KEY_HOST = "arangodb.host"; - private static final String PROPERTY_KEY_PORT = "arangodb.port"; - private static final String PROPERTY_KEY_TIMEOUT = "arangodb.timeout"; - private static final String PROPERTY_KEY_USER = "arangodb.user"; - private static final String PROPERTY_KEY_PASSWORD = "arangodb.password"; - private static final String PROPERTY_KEY_USE_SSL = "arangodb.usessl"; - private static final String PROPERTY_KEY_V_STREAM_CHUNK_CONTENT_SIZE = "arangodb.chunksize"; - private static final String PROPERTY_KEY_MAX_CONNECTIONS = "arangodb.connections.max"; - private static final String PROPERTY_KEY_PROTOCOL = "arangodb.protocol"; - private static final String PROPERTY_KEY_ACQUIRE_HOST_LIST = "arangodb.acquireHostList"; - private static final String PROPERTY_KEY_LOAD_BALANCING_STRATEGY = "arangodb.loadBalancingStrategy"; - protected static final String DEFAULT_PROPERTY_FILE = "/arangodb.properties"; - - public InternalArangoDB(final E executor, final ArangoSerialization util) { - super(executor, util); - } - - protected static void loadHosts(final Properties properties, final Collection hosts) { - final String hostsProp = properties.getProperty(PROPERTY_KEY_HOSTS); - if (hostsProp != null) { - final String[] hostsSplit = hostsProp.split(","); - for (final String host : hostsSplit) { - final String[] split = host.split(":"); - if (split.length != 2 || !split[1].matches("[0-9]+")) { - throw new ArangoDBException(String.format( - "Could not load property-value arangodb.hosts=%s. Expected format ip:port,ip:port,...", - hostsProp)); - } else { - hosts.add(new Host(split[0], Integer.valueOf(split[1]))); - } - } - } - } - - protected static String loadHost(final Properties properties, final String currentValue) { - final String host = getProperty(properties, PROPERTY_KEY_HOST, currentValue, ArangoDBConstants.DEFAULT_HOST); - if (host.contains(":")) { - throw new ArangoDBException(String.format( - "Could not load property-value arangodb.host=%s. Expect only ip. Do you mean arangodb.hosts=ip:port ?", - host)); - } - return host; - } - - protected static Integer loadPort(final Properties properties, final int currentValue) { - return Integer - .parseInt(getProperty(properties, PROPERTY_KEY_PORT, currentValue, ArangoDBConstants.DEFAULT_PORT)); - } - - protected static Integer loadTimeout(final Properties properties, final Integer currentValue) { - return Integer.parseInt( - getProperty(properties, PROPERTY_KEY_TIMEOUT, currentValue, ArangoDBConstants.DEFAULT_TIMEOUT)); - } - - protected static String loadUser(final Properties properties, final String currentValue) { - return getProperty(properties, PROPERTY_KEY_USER, currentValue, ArangoDBConstants.DEFAULT_USER); - } - - protected static String loadPassword(final Properties properties, final String currentValue) { - return getProperty(properties, PROPERTY_KEY_PASSWORD, currentValue, null); - } - - protected static Boolean loadUseSsl(final Properties properties, final Boolean currentValue) { - return Boolean.parseBoolean( - getProperty(properties, PROPERTY_KEY_USE_SSL, currentValue, ArangoDBConstants.DEFAULT_USE_SSL)); - } - - protected static Integer loadChunkSize(final Properties properties, final Integer currentValue) { - return Integer.parseInt(getProperty(properties, PROPERTY_KEY_V_STREAM_CHUNK_CONTENT_SIZE, currentValue, - ArangoDBConstants.CHUNK_DEFAULT_CONTENT_SIZE)); - } - - protected static Integer loadMaxConnections(final Properties properties, final Integer currentValue) { - return Integer.parseInt(getProperty(properties, PROPERTY_KEY_MAX_CONNECTIONS, currentValue, - ArangoDBConstants.MAX_CONNECTIONS_VST_DEFAULT)); - } - - protected static Protocol loadProtocol(final Properties properties, final Protocol currentValue) { - return Protocol.valueOf( - getProperty(properties, PROPERTY_KEY_PROTOCOL, currentValue, ArangoDBConstants.DEFAULT_NETWORK_PROTOCOL) - .toUpperCase()); - } - - protected static Boolean loadAcquireHostList(final Properties properties, final Boolean currentValue) { - return Boolean.parseBoolean(getProperty(properties, PROPERTY_KEY_ACQUIRE_HOST_LIST, currentValue, - ArangoDBConstants.DEFAULT_ACQUIRE_HOST_LIST)); - } - - protected static LoadBalancingStrategy loadLoadBalancingStrategy( - final Properties properties, - final LoadBalancingStrategy currentValue) { - return LoadBalancingStrategy.valueOf(getProperty(properties, PROPERTY_KEY_LOAD_BALANCING_STRATEGY, currentValue, - ArangoDBConstants.DEFAULT_LOAD_BALANCING_STRATEGY).toUpperCase()); - } - - private static String getProperty( - final Properties properties, - final String key, - final T currentValue, - final T defaultValue) { - return properties.getProperty(key, - currentValue != null ? currentValue.toString() : defaultValue != null ? defaultValue.toString() : null); - } - - protected Request getRoleRequest() { - return new Request(ArangoDBConstants.SYSTEM, RequestType.GET, ArangoDBConstants.PATH_API_ROLE); - } - - protected ResponseDeserializer getRoleResponseDeserializer() { - return new ResponseDeserializer() { - @Override - public ServerRole deserialize(final Response response) throws VPackException { - return util().deserialize(response.getBody().get(ArangoDBConstants.ROLE), ServerRole.class); - } - }; - } - - protected Request createDatabaseRequest(final String name) { - final Request request = new Request(ArangoDBConstants.SYSTEM, RequestType.POST, - ArangoDBConstants.PATH_API_DATABASE); - request.setBody(util().serialize(OptionsBuilder.build(new DBCreateOptions(), name))); - return request; - } - - protected ResponseDeserializer createDatabaseResponseDeserializer() { - return new ResponseDeserializer() { - @Override - public Boolean deserialize(final Response response) throws VPackException { - return response.getBody().get(ArangoDBConstants.RESULT).getAsBoolean(); - } - }; - } - - protected Request getDatabasesRequest(final String database) { - return new Request(database, RequestType.GET, ArangoDBConstants.PATH_API_DATABASE); - } - - protected ResponseDeserializer> getDatabaseResponseDeserializer() { - return new ResponseDeserializer>() { - @Override - public Collection deserialize(final Response response) throws VPackException { - final VPackSlice result = response.getBody().get(ArangoDBConstants.RESULT); - return util().deserialize(result, new Type>() { - }.getType()); - } - }; - } - - protected Request getAccessibleDatabasesForRequest(final String database, final String user) { - return new Request(database, RequestType.GET, - executor.createPath(ArangoDBConstants.PATH_API_USER, user, ArangoDBConstants.DATABASE)); - } - - protected ResponseDeserializer> getAccessibleDatabasesForResponseDeserializer() { - return new ResponseDeserializer>() { - @Override - public Collection deserialize(final Response response) throws VPackException { - final VPackSlice result = response.getBody().get(ArangoDBConstants.RESULT); - final Collection dbs = new ArrayList(); - for (final Iterator> iterator = result.objectIterator(); iterator - .hasNext();) { - dbs.add(iterator.next().getKey()); - } - return dbs; - } - }; - } - - protected Request createUserRequest( - final String database, - final String user, - final String passwd, - final UserCreateOptions options) { - final Request request; - request = new Request(database, RequestType.POST, ArangoDBConstants.PATH_API_USER); - request.setBody( - util().serialize(OptionsBuilder.build(options != null ? options : new UserCreateOptions(), user, passwd))); - return request; - } - - protected Request deleteUserRequest(final String database, final String user) { - return new Request(database, RequestType.DELETE, executor.createPath(ArangoDBConstants.PATH_API_USER, user)); - } - - protected Request getUsersRequest(final String database) { - return new Request(database, RequestType.GET, ArangoDBConstants.PATH_API_USER); - } - - protected Request getUserRequest(final String database, final String user) { - return new Request(database, RequestType.GET, executor.createPath(ArangoDBConstants.PATH_API_USER, user)); - } - - protected ResponseDeserializer> getUsersResponseDeserializer() { - return new ResponseDeserializer>() { - @Override - public Collection deserialize(final Response response) throws VPackException { - final VPackSlice result = response.getBody().get(ArangoDBConstants.RESULT); - return util().deserialize(result, new Type>() { - }.getType()); - } - }; - } - - protected Request updateUserRequest(final String database, final String user, final UserUpdateOptions options) { - final Request request; - request = new Request(database, RequestType.PATCH, executor.createPath(ArangoDBConstants.PATH_API_USER, user)); - request.setBody(util().serialize(options != null ? options : new UserUpdateOptions())); - return request; - } - - protected Request replaceUserRequest(final String database, final String user, final UserUpdateOptions options) { - final Request request; - request = new Request(database, RequestType.PUT, executor.createPath(ArangoDBConstants.PATH_API_USER, user)); - request.setBody(util().serialize(options != null ? options : new UserUpdateOptions())); - return request; - } - - protected Request updateUserDefaultDatabaseAccessRequest(final String user, final Permissions permissions) { - return new Request(ArangoDBConstants.SYSTEM, RequestType.PUT, - executor.createPath(ArangoDBConstants.PATH_API_USER, user, ArangoDBConstants.DATABASE, "*")) - .setBody(util().serialize(OptionsBuilder.build(new UserAccessOptions(), permissions))); - } - - protected Request updateUserDefaultCollectionAccessRequest(final String user, final Permissions permissions) { - return new Request(ArangoDBConstants.SYSTEM, RequestType.PUT, - executor.createPath(ArangoDBConstants.PATH_API_USER, user, ArangoDBConstants.DATABASE, "*", "*")) - .setBody(util().serialize(OptionsBuilder.build(new UserAccessOptions(), permissions))); - } - - protected Request getLogsRequest(final LogOptions options) { - final LogOptions params = options != null ? options : new LogOptions(); - return new Request(ArangoDBConstants.SYSTEM, RequestType.GET, ArangoDBConstants.PATH_API_ADMIN_LOG) - .putQueryParam(LogOptions.PROPERTY_UPTO, params.getUpto()) - .putQueryParam(LogOptions.PROPERTY_LEVEL, params.getLevel()) - .putQueryParam(LogOptions.PROPERTY_START, params.getStart()) - .putQueryParam(LogOptions.PROPERTY_SIZE, params.getSize()) - .putQueryParam(LogOptions.PROPERTY_OFFSET, params.getOffset()) - .putQueryParam(LogOptions.PROPERTY_SEARCH, params.getSearch()) - .putQueryParam(LogOptions.PROPERTY_SORT, params.getSort()); - } - - protected Request getLogLevelRequest() { - return new Request(ArangoDBConstants.SYSTEM, RequestType.GET, ArangoDBConstants.PATH_API_ADMIN_LOG_LEVEL); - } - - protected Request setLogLevelRequest(final LogLevelEntity entity) { - return new Request(ArangoDBConstants.SYSTEM, RequestType.PUT, ArangoDBConstants.PATH_API_ADMIN_LOG_LEVEL) - .setBody(util().serialize(entity)); - } - -} diff --git a/src/main/java/com/arangodb/internal/InternalArangoDatabase.java b/src/main/java/com/arangodb/internal/InternalArangoDatabase.java deleted file mode 100644 index 2032faf9f..000000000 --- a/src/main/java/com/arangodb/internal/InternalArangoDatabase.java +++ /dev/null @@ -1,386 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.internal; - -import java.util.ArrayList; -import java.util.Collection; -import java.util.Iterator; -import java.util.Map; - -import com.arangodb.entity.CollectionEntity; -import com.arangodb.entity.DatabaseEntity; -import com.arangodb.entity.EdgeDefinition; -import com.arangodb.entity.GraphEntity; -import com.arangodb.entity.PathEntity; -import com.arangodb.entity.Permissions; -import com.arangodb.entity.QueryCachePropertiesEntity; -import com.arangodb.entity.QueryTrackingPropertiesEntity; -import com.arangodb.entity.TraversalEntity; -import com.arangodb.internal.ArangoExecutor.ResponseDeserializer; -import com.arangodb.internal.velocystream.internal.VstConnection; -import com.arangodb.model.AqlFunctionCreateOptions; -import com.arangodb.model.AqlFunctionDeleteOptions; -import com.arangodb.model.AqlFunctionGetOptions; -import com.arangodb.model.AqlQueryExplainOptions; -import com.arangodb.model.AqlQueryOptions; -import com.arangodb.model.AqlQueryParseOptions; -import com.arangodb.model.CollectionCreateOptions; -import com.arangodb.model.CollectionsReadOptions; -import com.arangodb.model.GraphCreateOptions; -import com.arangodb.model.OptionsBuilder; -import com.arangodb.model.TransactionOptions; -import com.arangodb.model.TraversalOptions; -import com.arangodb.model.UserAccessOptions; -import com.arangodb.util.ArangoSerialization; -import com.arangodb.velocypack.Type; -import com.arangodb.velocypack.VPackSlice; -import com.arangodb.velocypack.exception.VPackException; -import com.arangodb.velocystream.Request; -import com.arangodb.velocystream.RequestType; -import com.arangodb.velocystream.Response; - -/** - * @author Mark Vollmary - * - */ -public class InternalArangoDatabase, E extends ArangoExecutor, R, C extends VstConnection> - extends ArangoExecuteable { - - private final String name; - private final A arango; - - public InternalArangoDatabase(final A arango, final E executor, final ArangoSerialization util, final String name) { - super(executor, util); - this.arango = arango; - this.name = name; - } - - public A arango() { - return arango; - } - - public String name() { - return name; - } - - protected ResponseDeserializer> getDatabaseResponseDeserializer() { - return arango.getDatabaseResponseDeserializer(); - } - - protected Request getAccessibleDatabasesRequest() { - return new Request(name, RequestType.GET, - executor.createPath(ArangoDBConstants.PATH_API_DATABASE, ArangoDBConstants.USER)); - } - - protected Request getVersionRequest() { - return new Request(name, RequestType.GET, ArangoDBConstants.PATH_API_VERSION); - } - - protected Request createCollectionRequest(final String name, final CollectionCreateOptions options) { - return new Request(name(), RequestType.POST, ArangoDBConstants.PATH_API_COLLECTION).setBody( - util().serialize(OptionsBuilder.build(options != null ? options : new CollectionCreateOptions(), name))); - } - - protected Request getCollectionsRequest(final CollectionsReadOptions options) { - final Request request; - request = new Request(name(), RequestType.GET, ArangoDBConstants.PATH_API_COLLECTION); - final CollectionsReadOptions params = (options != null ? options : new CollectionsReadOptions()); - request.putQueryParam(ArangoDBConstants.EXCLUDE_SYSTEM, params.getExcludeSystem()); - return request; - } - - protected ResponseDeserializer> getCollectionsResponseDeserializer() { - return new ResponseDeserializer>() { - @Override - public Collection deserialize(final Response response) throws VPackException { - final VPackSlice result = response.getBody().get(ArangoDBConstants.RESULT); - return util().deserialize(result, new Type>() { - }.getType()); - } - }; - } - - protected Request dropRequest() { - return new Request(ArangoDBConstants.SYSTEM, RequestType.DELETE, - executor.createPath(ArangoDBConstants.PATH_API_DATABASE, name)); - } - - protected ResponseDeserializer createDropResponseDeserializer() { - return new ResponseDeserializer() { - @Override - public Boolean deserialize(final Response response) throws VPackException { - return response.getBody().get(ArangoDBConstants.RESULT).getAsBoolean(); - } - }; - } - - protected Request grantAccessRequest(final String user, final Permissions permissions) { - return new Request(ArangoDBConstants.SYSTEM, RequestType.PUT, - executor.createPath(ArangoDBConstants.PATH_API_USER, user, ArangoDBConstants.DATABASE, name)) - .setBody(util().serialize(OptionsBuilder.build(new UserAccessOptions(), permissions))); - } - - protected Request resetAccessRequest(final String user) { - return new Request(ArangoDBConstants.SYSTEM, RequestType.DELETE, - executor.createPath(ArangoDBConstants.PATH_API_USER, user, ArangoDBConstants.DATABASE, name)); - } - - protected Request updateUserDefaultCollectionAccessRequest(final String user, final Permissions permissions) { - return new Request(ArangoDBConstants.SYSTEM, RequestType.PUT, - executor.createPath(ArangoDBConstants.PATH_API_USER, user, ArangoDBConstants.DATABASE, name, "*")) - .setBody(util().serialize(OptionsBuilder.build(new UserAccessOptions(), permissions))); - } - - protected Request getPermissionsRequest(final String user) { - return new Request(ArangoDBConstants.SYSTEM, RequestType.GET, - executor.createPath(ArangoDBConstants.PATH_API_USER, user, ArangoDBConstants.DATABASE, name)); - } - - protected ResponseDeserializer getPermissionsResponseDeserialzer() { - return new ResponseDeserializer() { - @Override - public Permissions deserialize(final Response response) throws VPackException { - final VPackSlice body = response.getBody(); - if (body != null) { - final VPackSlice result = body.get(ArangoDBConstants.RESULT); - if (!result.isNone()) { - return util().deserialize(result, Permissions.class); - } - } - return null; - } - }; - } - - protected Request queryRequest( - final String query, - final Map bindVars, - final AqlQueryOptions options) { - return new Request(name, RequestType.POST, ArangoDBConstants.PATH_API_CURSOR).setBody( - util().serialize(OptionsBuilder.build(options != null ? options : new AqlQueryOptions(), query, bindVars))); - } - - protected Request queryNextRequest(final String id) { - return new Request(name, RequestType.PUT, executor.createPath(ArangoDBConstants.PATH_API_CURSOR, id)); - } - - protected Request queryCloseRequest(final String id) { - return new Request(name, RequestType.DELETE, executor.createPath(ArangoDBConstants.PATH_API_CURSOR, id)); - } - - protected Request explainQueryRequest( - final String query, - final Map bindVars, - final AqlQueryExplainOptions options) { - return new Request(name, RequestType.POST, ArangoDBConstants.PATH_API_EXPLAIN).setBody(util().serialize( - OptionsBuilder.build(options != null ? options : new AqlQueryExplainOptions(), query, bindVars))); - } - - protected Request parseQueryRequest(final String query) { - return new Request(name, RequestType.POST, ArangoDBConstants.PATH_API_QUERY) - .setBody(util().serialize(OptionsBuilder.build(new AqlQueryParseOptions(), query))); - } - - protected Request clearQueryCacheRequest() { - return new Request(name, RequestType.DELETE, ArangoDBConstants.PATH_API_QUERY_CACHE); - } - - protected Request getQueryCachePropertiesRequest() { - return new Request(name, RequestType.GET, ArangoDBConstants.PATH_API_QUERY_CACHE_PROPERTIES); - } - - protected Request setQueryCachePropertiesRequest(final QueryCachePropertiesEntity properties) { - return new Request(name, RequestType.PUT, ArangoDBConstants.PATH_API_QUERY_CACHE_PROPERTIES) - .setBody(util().serialize(properties)); - } - - protected Request getQueryTrackingPropertiesRequest() { - return new Request(name, RequestType.GET, ArangoDBConstants.PATH_API_QUERY_PROPERTIES); - } - - protected Request setQueryTrackingPropertiesRequest(final QueryTrackingPropertiesEntity properties) { - return new Request(name, RequestType.PUT, ArangoDBConstants.PATH_API_QUERY_PROPERTIES) - .setBody(util().serialize(properties)); - } - - protected Request getCurrentlyRunningQueriesRequest() { - return new Request(name, RequestType.GET, ArangoDBConstants.PATH_API_QUERY_CURRENT); - } - - protected Request getSlowQueriesRequest() { - return new Request(name, RequestType.GET, ArangoDBConstants.PATH_API_QUERY_SLOW); - } - - protected Request clearSlowQueriesRequest() { - return new Request(name, RequestType.DELETE, ArangoDBConstants.PATH_API_QUERY_SLOW); - } - - protected Request killQueryRequest(final String id) { - return new Request(name, RequestType.DELETE, executor.createPath(ArangoDBConstants.PATH_API_QUERY, id)); - } - - protected Request createAqlFunctionRequest( - final String name, - final String code, - final AqlFunctionCreateOptions options) { - return new Request(name(), RequestType.POST, ArangoDBConstants.PATH_API_AQLFUNCTION).setBody(util().serialize( - OptionsBuilder.build(options != null ? options : new AqlFunctionCreateOptions(), name, code))); - } - - protected Request deleteAqlFunctionRequest(final String name, final AqlFunctionDeleteOptions options) { - final Request request = new Request(name(), RequestType.DELETE, - executor.createPath(ArangoDBConstants.PATH_API_AQLFUNCTION, name)); - final AqlFunctionDeleteOptions params = options != null ? options : new AqlFunctionDeleteOptions(); - request.putQueryParam(ArangoDBConstants.GROUP, params.getGroup()); - return request; - } - - protected Request getAqlFunctionsRequest(final AqlFunctionGetOptions options) { - final Request request = new Request(name(), RequestType.GET, ArangoDBConstants.PATH_API_AQLFUNCTION); - final AqlFunctionGetOptions params = options != null ? options : new AqlFunctionGetOptions(); - request.putQueryParam(ArangoDBConstants.NAMESPACE, params.getNamespace()); - return request; - } - - protected Request createGraphRequest( - final String name, - final Collection edgeDefinitions, - final GraphCreateOptions options) { - return new Request(name(), RequestType.POST, ArangoDBConstants.PATH_API_GHARIAL).setBody(util().serialize( - OptionsBuilder.build(options != null ? options : new GraphCreateOptions(), name, edgeDefinitions))); - } - - protected ResponseDeserializer createGraphResponseDeserializer() { - return new ResponseDeserializer() { - @Override - public GraphEntity deserialize(final Response response) throws VPackException { - return util().deserialize(response.getBody().get(ArangoDBConstants.GRAPH), GraphEntity.class); - } - }; - } - - protected Request getGraphsRequest() { - return new Request(name, RequestType.GET, ArangoDBConstants.PATH_API_GHARIAL); - } - - protected ResponseDeserializer> getGraphsResponseDeserializer() { - return new ResponseDeserializer>() { - @Override - public Collection deserialize(final Response response) throws VPackException { - return util().deserialize(response.getBody().get(ArangoDBConstants.GRAPHS), - new Type>() { - }.getType()); - } - }; - } - - protected Request transactionRequest(final String action, final TransactionOptions options) { - return new Request(name, RequestType.POST, ArangoDBConstants.PATH_API_TRANSACTION).setBody( - util().serialize(OptionsBuilder.build(options != null ? options : new TransactionOptions(), action))); - } - - protected ResponseDeserializer transactionResponseDeserializer(final Class type) { - return new ResponseDeserializer() { - @Override - public T deserialize(final Response response) throws VPackException { - final VPackSlice body = response.getBody(); - if (body != null) { - final VPackSlice result = body.get(ArangoDBConstants.RESULT); - if (!result.isNone() && !result.isNull()) { - return util().deserialize(result, type); - } - } - return null; - } - }; - } - - protected Request getInfoRequest() { - return new Request(name, RequestType.GET, - executor.createPath(ArangoDBConstants.PATH_API_DATABASE, ArangoDBConstants.CURRENT)); - } - - protected ResponseDeserializer getInfoResponseDeserializer() { - return new ResponseDeserializer() { - @Override - public DatabaseEntity deserialize(final Response response) throws VPackException { - return util().deserialize(response.getBody().get(ArangoDBConstants.RESULT), DatabaseEntity.class); - } - }; - } - - protected Request executeTraversalRequest(final TraversalOptions options) { - return new Request(name, RequestType.POST, ArangoDBConstants.PATH_API_TRAVERSAL) - .setBody(util().serialize(options != null ? options : new TransactionOptions())); - } - - @SuppressWarnings("hiding") - protected ResponseDeserializer> executeTraversalResponseDeserializer( - final Class vertexClass, - final Class edgeClass) { - return new ResponseDeserializer>() { - @Override - public TraversalEntity deserialize(final Response response) throws VPackException { - final TraversalEntity result = new TraversalEntity(); - final VPackSlice visited = response.getBody().get(ArangoDBConstants.RESULT) - .get(ArangoDBConstants.VISITED); - result.setVertices(deserializeVertices(vertexClass, visited)); - - final Collection> paths = new ArrayList>(); - for (final Iterator iterator = visited.get("paths").arrayIterator(); iterator.hasNext();) { - final PathEntity path = new PathEntity(); - final VPackSlice next = iterator.next(); - path.setEdges(deserializeEdges(edgeClass, next)); - path.setVertices(deserializeVertices(vertexClass, next)); - paths.add(path); - } - result.setPaths(paths); - return result; - } - }; - } - - @SuppressWarnings("unchecked") - protected Collection deserializeVertices(final Class vertexClass, final VPackSlice vpack) - throws VPackException { - final Collection vertices = new ArrayList(); - for (final Iterator iterator = vpack.get(ArangoDBConstants.VERTICES).arrayIterator(); iterator - .hasNext();) { - vertices.add((V) util().deserialize(iterator.next(), vertexClass)); - } - return vertices; - } - - @SuppressWarnings({ "hiding", "unchecked" }) - protected Collection deserializeEdges(final Class edgeClass, final VPackSlice next) - throws VPackException { - final Collection edges = new ArrayList(); - for (final Iterator iteratorEdge = next.get(ArangoDBConstants.EDGES).arrayIterator(); iteratorEdge - .hasNext();) { - edges.add((E) util().deserialize(iteratorEdge.next(), edgeClass)); - } - return edges; - } - - protected Request reloadRoutingRequest() { - return new Request(name, RequestType.POST, ArangoDBConstants.PATH_API_ADMIN_ROUTING_RELOAD); - } -} diff --git a/src/main/java/com/arangodb/internal/InternalArangoEdgeCollection.java b/src/main/java/com/arangodb/internal/InternalArangoEdgeCollection.java deleted file mode 100644 index fb1f748b2..000000000 --- a/src/main/java/com/arangodb/internal/InternalArangoEdgeCollection.java +++ /dev/null @@ -1,173 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.internal; - -import java.util.HashMap; -import java.util.Map; - -import com.arangodb.entity.DocumentField; -import com.arangodb.entity.EdgeEntity; -import com.arangodb.entity.EdgeUpdateEntity; -import com.arangodb.internal.ArangoExecutor.ResponseDeserializer; -import com.arangodb.internal.velocystream.internal.VstConnection; -import com.arangodb.model.DocumentReadOptions; -import com.arangodb.model.EdgeCreateOptions; -import com.arangodb.model.EdgeDeleteOptions; -import com.arangodb.model.EdgeReplaceOptions; -import com.arangodb.model.EdgeUpdateOptions; -import com.arangodb.util.ArangoSerializer; -import com.arangodb.velocypack.VPackSlice; -import com.arangodb.velocypack.exception.VPackException; -import com.arangodb.velocystream.Request; -import com.arangodb.velocystream.RequestType; -import com.arangodb.velocystream.Response; - -/** - * @author Mark Vollmary - * - */ -public class InternalArangoEdgeCollection, D extends InternalArangoDatabase, G extends InternalArangoGraph, E extends ArangoExecutor, R, C extends VstConnection> - extends ArangoExecuteable { - - private final G graph; - private final String name; - - public InternalArangoEdgeCollection(final G graph, final String name) { - super(graph.executor(), graph.util()); - this.graph = graph; - this.name = name; - } - - public G graph() { - return graph; - } - - public String name() { - return name; - } - - protected Request insertEdgeRequest(final T value, final EdgeCreateOptions options) { - final Request request = new Request(graph.db().name(), RequestType.POST, - executor.createPath(ArangoDBConstants.PATH_API_GHARIAL, graph.name(), ArangoDBConstants.EDGE, name)); - final EdgeCreateOptions params = (options != null ? options : new EdgeCreateOptions()); - request.putQueryParam(ArangoDBConstants.WAIT_FOR_SYNC, params.getWaitForSync()); - request.setBody(util().serialize(value)); - return request; - } - - protected ResponseDeserializer insertEdgeResponseDeserializer(final T value) { - return new ResponseDeserializer() { - @Override - public EdgeEntity deserialize(final Response response) throws VPackException { - final VPackSlice body = response.getBody().get(ArangoDBConstants.EDGE); - final EdgeEntity doc = util().deserialize(body, EdgeEntity.class); - final Map values = new HashMap(); - values.put(DocumentField.Type.ID, doc.getId()); - values.put(DocumentField.Type.KEY, doc.getKey()); - values.put(DocumentField.Type.REV, doc.getRev()); - executor.documentCache().setValues(value, values); - return doc; - } - }; - } - - protected Request getEdgeRequest(final String key, final DocumentReadOptions options) { - final Request request = new Request(graph.db().name(), RequestType.GET, - executor.createPath(ArangoDBConstants.PATH_API_GHARIAL, graph.name(), ArangoDBConstants.EDGE, - executor.createDocumentHandle(name, key))); - final DocumentReadOptions params = (options != null ? options : new DocumentReadOptions()); - request.putHeaderParam(ArangoDBConstants.IF_NONE_MATCH, params.getIfNoneMatch()); - request.putHeaderParam(ArangoDBConstants.IF_MATCH, params.getIfMatch()); - return request; - } - - protected ResponseDeserializer getEdgeResponseDeserializer(final Class type) { - return new ResponseDeserializer() { - @Override - public T deserialize(final Response response) throws VPackException { - return util().deserialize(response.getBody().get(ArangoDBConstants.EDGE), type); - } - }; - } - - protected Request replaceEdgeRequest(final String key, final T value, final EdgeReplaceOptions options) { - final Request request = new Request(graph.db().name(), RequestType.PUT, - executor.createPath(ArangoDBConstants.PATH_API_GHARIAL, graph.name(), ArangoDBConstants.EDGE, - executor.createDocumentHandle(name, key))); - final EdgeReplaceOptions params = (options != null ? options : new EdgeReplaceOptions()); - request.putQueryParam(ArangoDBConstants.WAIT_FOR_SYNC, params.getWaitForSync()); - request.putHeaderParam(ArangoDBConstants.IF_MATCH, params.getIfMatch()); - request.setBody(util().serialize(value)); - return request; - } - - protected ResponseDeserializer replaceEdgeResponseDeserializer(final T value) { - return new ResponseDeserializer() { - @Override - public EdgeUpdateEntity deserialize(final Response response) throws VPackException { - final VPackSlice body = response.getBody().get(ArangoDBConstants.EDGE); - final EdgeUpdateEntity doc = util().deserialize(body, EdgeUpdateEntity.class); - final Map values = new HashMap(); - values.put(DocumentField.Type.REV, doc.getRev()); - executor.documentCache().setValues(value, values); - return doc; - } - }; - } - - protected Request updateEdgeRequest(final String key, final T value, final EdgeUpdateOptions options) { - final Request request; - request = new Request(graph.db().name(), RequestType.PATCH, - executor.createPath(ArangoDBConstants.PATH_API_GHARIAL, graph.name(), ArangoDBConstants.EDGE, - executor.createDocumentHandle(name, key))); - final EdgeUpdateOptions params = (options != null ? options : new EdgeUpdateOptions()); - request.putQueryParam(ArangoDBConstants.KEEP_NULL, params.getKeepNull()); - request.putQueryParam(ArangoDBConstants.WAIT_FOR_SYNC, params.getWaitForSync()); - request.putHeaderParam(ArangoDBConstants.IF_MATCH, params.getIfMatch()); - request.setBody(util().serialize(value, new ArangoSerializer.Options().serializeNullValues(true))); - return request; - } - - protected ResponseDeserializer updateEdgeResponseDeserializer(final T value) { - return new ResponseDeserializer() { - @Override - public EdgeUpdateEntity deserialize(final Response response) throws VPackException { - final VPackSlice body = response.getBody().get(ArangoDBConstants.EDGE); - final EdgeUpdateEntity doc = util().deserialize(body, EdgeUpdateEntity.class); - final Map values = new HashMap(); - values.put(DocumentField.Type.REV, doc.getRev()); - executor.documentCache().setValues(value, values); - return doc; - } - }; - } - - protected Request deleteEdgeRequest(final String key, final EdgeDeleteOptions options) { - final Request request = new Request(graph.db().name(), RequestType.DELETE, - executor.createPath(ArangoDBConstants.PATH_API_GHARIAL, graph.name(), ArangoDBConstants.EDGE, - executor.createDocumentHandle(name, key))); - final EdgeDeleteOptions params = (options != null ? options : new EdgeDeleteOptions()); - request.putQueryParam(ArangoDBConstants.WAIT_FOR_SYNC, params.getWaitForSync()); - request.putHeaderParam(ArangoDBConstants.IF_MATCH, params.getIfMatch()); - return request; - } - -} diff --git a/src/main/java/com/arangodb/internal/InternalArangoGraph.java b/src/main/java/com/arangodb/internal/InternalArangoGraph.java deleted file mode 100644 index 355d63acc..000000000 --- a/src/main/java/com/arangodb/internal/InternalArangoGraph.java +++ /dev/null @@ -1,163 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.internal; - -import java.util.Collection; - -import com.arangodb.entity.EdgeDefinition; -import com.arangodb.entity.GraphEntity; -import com.arangodb.internal.ArangoExecutor.ResponseDeserializer; -import com.arangodb.internal.velocystream.internal.VstConnection; -import com.arangodb.model.OptionsBuilder; -import com.arangodb.model.VertexCollectionCreateOptions; -import com.arangodb.velocypack.Type; -import com.arangodb.velocypack.exception.VPackException; -import com.arangodb.velocystream.Request; -import com.arangodb.velocystream.RequestType; -import com.arangodb.velocystream.Response; - -/** - * @author Mark Vollmary - * - */ -public class InternalArangoGraph, D extends InternalArangoDatabase, E extends ArangoExecutor, R, C extends VstConnection> - extends ArangoExecuteable { - - private final D db; - private final String name; - - public InternalArangoGraph(final D db, final String name) { - super(db.executor(), db.util()); - this.db = db; - this.name = name; - } - - public D db() { - return db; - } - - public String name() { - return name; - } - - protected Request dropRequest() { - return new Request(db.name(), RequestType.DELETE, - executor.createPath(ArangoDBConstants.PATH_API_GHARIAL, name)); - } - - protected Request getInfoRequest() { - return new Request(db.name(), RequestType.GET, executor.createPath(ArangoDBConstants.PATH_API_GHARIAL, name)); - } - - protected ResponseDeserializer getInfoResponseDeserializer() { - return addVertexCollectionResponseDeserializer(); - } - - protected Request getVertexCollectionsRequest() { - return new Request(db.name(), RequestType.GET, - executor.createPath(ArangoDBConstants.PATH_API_GHARIAL, name, ArangoDBConstants.VERTEX)); - } - - protected ResponseDeserializer> getVertexCollectionsResponseDeserializer() { - return new ResponseDeserializer>() { - @Override - public Collection deserialize(final Response response) throws VPackException { - return util().deserialize(response.getBody().get(ArangoDBConstants.COLLECTIONS), - new Type>() { - }.getType()); - } - }; - } - - protected Request addVertexCollectionRequest(final String name) { - final Request request = new Request(db.name(), RequestType.POST, - executor.createPath(ArangoDBConstants.PATH_API_GHARIAL, name(), ArangoDBConstants.VERTEX)); - request.setBody(util().serialize(OptionsBuilder.build(new VertexCollectionCreateOptions(), name))); - return request; - } - - protected ResponseDeserializer addVertexCollectionResponseDeserializer() { - return addEdgeDefinitionResponseDeserializer(); - } - - protected Request getEdgeDefinitionsRequest() { - return new Request(db.name(), RequestType.GET, - executor.createPath(ArangoDBConstants.PATH_API_GHARIAL, name, ArangoDBConstants.EDGE)); - } - - protected ResponseDeserializer> getEdgeDefinitionsDeserializer() { - return new ResponseDeserializer>() { - @Override - public Collection deserialize(final Response response) throws VPackException { - return util().deserialize(response.getBody().get(ArangoDBConstants.COLLECTIONS), - new Type>() { - }.getType()); - } - }; - } - - protected Request addEdgeDefinitionRequest(final EdgeDefinition definition) { - final Request request = new Request(db.name(), RequestType.POST, - executor.createPath(ArangoDBConstants.PATH_API_GHARIAL, name, ArangoDBConstants.EDGE)); - request.setBody(util().serialize(definition)); - return request; - } - - protected ResponseDeserializer addEdgeDefinitionResponseDeserializer() { - return new ResponseDeserializer() { - @Override - public GraphEntity deserialize(final Response response) throws VPackException { - return util().deserialize(response.getBody().get(ArangoDBConstants.GRAPH), GraphEntity.class); - } - }; - } - - protected Request replaceEdgeDefinitionRequest(final EdgeDefinition definition) { - final Request request = new Request(db.name(), RequestType.PUT, executor.createPath( - ArangoDBConstants.PATH_API_GHARIAL, name, ArangoDBConstants.EDGE, definition.getCollection())); - request.setBody(util().serialize(definition)); - return request; - } - - protected ResponseDeserializer replaceEdgeDefinitionResponseDeserializer() { - return new ResponseDeserializer() { - @Override - public GraphEntity deserialize(final Response response) throws VPackException { - return util().deserialize(response.getBody().get(ArangoDBConstants.GRAPH), GraphEntity.class); - } - }; - } - - protected Request removeEdgeDefinitionRequest(final String definitionName) { - return new Request(db.name(), RequestType.DELETE, - executor.createPath(ArangoDBConstants.PATH_API_GHARIAL, name, ArangoDBConstants.EDGE, definitionName)); - } - - protected ResponseDeserializer removeEdgeDefinitionResponseDeserializer() { - return new ResponseDeserializer() { - @Override - public GraphEntity deserialize(final Response response) throws VPackException { - return util().deserialize(response.getBody().get(ArangoDBConstants.GRAPH), GraphEntity.class); - } - }; - } - -} diff --git a/src/main/java/com/arangodb/internal/InternalArangoVertexCollection.java b/src/main/java/com/arangodb/internal/InternalArangoVertexCollection.java deleted file mode 100644 index 735d1aed1..000000000 --- a/src/main/java/com/arangodb/internal/InternalArangoVertexCollection.java +++ /dev/null @@ -1,178 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.internal; - -import java.util.HashMap; -import java.util.Map; - -import com.arangodb.entity.DocumentField; -import com.arangodb.entity.VertexEntity; -import com.arangodb.entity.VertexUpdateEntity; -import com.arangodb.internal.ArangoExecutor.ResponseDeserializer; -import com.arangodb.internal.velocystream.internal.VstConnection; -import com.arangodb.model.DocumentReadOptions; -import com.arangodb.model.VertexCreateOptions; -import com.arangodb.model.VertexDeleteOptions; -import com.arangodb.model.VertexReplaceOptions; -import com.arangodb.model.VertexUpdateOptions; -import com.arangodb.util.ArangoSerializer; -import com.arangodb.velocypack.VPackSlice; -import com.arangodb.velocypack.exception.VPackException; -import com.arangodb.velocystream.Request; -import com.arangodb.velocystream.RequestType; -import com.arangodb.velocystream.Response; - -/** - * @author Mark Vollmary - * - */ -public class InternalArangoVertexCollection, D extends InternalArangoDatabase, G extends InternalArangoGraph, E extends ArangoExecutor, R, C extends VstConnection> - extends ArangoExecuteable { - - private final G graph; - private final String name; - - public InternalArangoVertexCollection(final G graph, final String name) { - super(graph.executor(), graph.util()); - this.graph = graph; - this.name = name; - } - - public G graph() { - return graph; - } - - public String name() { - return name; - } - - protected Request dropRequest() { - return new Request(graph.db().name(), RequestType.DELETE, - executor.createPath(ArangoDBConstants.PATH_API_GHARIAL, graph.name(), ArangoDBConstants.VERTEX, name)); - } - - protected Request insertVertexRequest(final T value, final VertexCreateOptions options) { - final Request request = new Request(graph.db().name(), RequestType.POST, - executor.createPath(ArangoDBConstants.PATH_API_GHARIAL, graph.name(), ArangoDBConstants.VERTEX, name)); - final VertexCreateOptions params = (options != null ? options : new VertexCreateOptions()); - request.putQueryParam(ArangoDBConstants.WAIT_FOR_SYNC, params.getWaitForSync()); - request.setBody(util().serialize(value)); - return request; - } - - protected ResponseDeserializer insertVertexResponseDeserializer(final T value) { - return new ResponseDeserializer() { - @Override - public VertexEntity deserialize(final Response response) throws VPackException { - final VPackSlice body = response.getBody().get(ArangoDBConstants.VERTEX); - final VertexEntity doc = util().deserialize(body, VertexEntity.class); - final Map values = new HashMap(); - values.put(DocumentField.Type.ID, doc.getId()); - values.put(DocumentField.Type.KEY, doc.getKey()); - values.put(DocumentField.Type.REV, doc.getRev()); - executor.documentCache().setValues(value, values); - return doc; - } - }; - } - - protected Request getVertexRequest(final String key, final DocumentReadOptions options) { - final Request request = new Request(graph.db().name(), RequestType.GET, - executor.createPath(ArangoDBConstants.PATH_API_GHARIAL, graph.name(), ArangoDBConstants.VERTEX, - executor.createDocumentHandle(name, key))); - final DocumentReadOptions params = (options != null ? options : new DocumentReadOptions()); - request.putHeaderParam(ArangoDBConstants.IF_NONE_MATCH, params.getIfNoneMatch()); - request.putHeaderParam(ArangoDBConstants.IF_MATCH, params.getIfMatch()); - return request; - } - - protected ResponseDeserializer getVertexResponseDeserializer(final Class type) { - return new ResponseDeserializer() { - @Override - public T deserialize(final Response response) throws VPackException { - return util().deserialize(response.getBody().get(ArangoDBConstants.VERTEX), type); - } - }; - } - - protected Request replaceVertexRequest(final String key, final T value, final VertexReplaceOptions options) { - final Request request = new Request(graph.db().name(), RequestType.PUT, - executor.createPath(ArangoDBConstants.PATH_API_GHARIAL, graph.name(), ArangoDBConstants.VERTEX, - executor.createDocumentHandle(name, key))); - final VertexReplaceOptions params = (options != null ? options : new VertexReplaceOptions()); - request.putQueryParam(ArangoDBConstants.WAIT_FOR_SYNC, params.getWaitForSync()); - request.putHeaderParam(ArangoDBConstants.IF_MATCH, params.getIfMatch()); - request.setBody(util().serialize(value)); - return request; - } - - protected ResponseDeserializer replaceVertexResponseDeserializer(final T value) { - return new ResponseDeserializer() { - @Override - public VertexUpdateEntity deserialize(final Response response) throws VPackException { - final VPackSlice body = response.getBody().get(ArangoDBConstants.VERTEX); - final VertexUpdateEntity doc = util().deserialize(body, VertexUpdateEntity.class); - final Map values = new HashMap(); - values.put(DocumentField.Type.REV, doc.getRev()); - executor.documentCache().setValues(value, values); - return doc; - } - }; - } - - protected Request updateVertexRequest(final String key, final T value, final VertexUpdateOptions options) { - final Request request; - request = new Request(graph.db().name(), RequestType.PATCH, - executor.createPath(ArangoDBConstants.PATH_API_GHARIAL, graph.name(), ArangoDBConstants.VERTEX, - executor.createDocumentHandle(name, key))); - final VertexUpdateOptions params = (options != null ? options : new VertexUpdateOptions()); - request.putQueryParam(ArangoDBConstants.KEEP_NULL, params.getKeepNull()); - request.putQueryParam(ArangoDBConstants.WAIT_FOR_SYNC, params.getWaitForSync()); - request.putHeaderParam(ArangoDBConstants.IF_MATCH, params.getIfMatch()); - request.setBody(util().serialize(value, new ArangoSerializer.Options().serializeNullValues(true))); - return request; - } - - protected ResponseDeserializer updateVertexResponseDeserializer(final T value) { - return new ResponseDeserializer() { - @Override - public VertexUpdateEntity deserialize(final Response response) throws VPackException { - final VPackSlice body = response.getBody().get(ArangoDBConstants.VERTEX); - final VertexUpdateEntity doc = util().deserialize(body, VertexUpdateEntity.class); - final Map values = new HashMap(); - values.put(DocumentField.Type.REV, doc.getRev()); - executor.documentCache().setValues(value, values); - return doc; - } - }; - } - - protected Request deleteVertexRequest(final String key, final VertexDeleteOptions options) { - final Request request = new Request(graph.db().name(), RequestType.DELETE, - executor.createPath(ArangoDBConstants.PATH_API_GHARIAL, graph.name(), ArangoDBConstants.VERTEX, - executor.createDocumentHandle(name, key))); - final VertexDeleteOptions params = (options != null ? options : new VertexDeleteOptions()); - request.putQueryParam(ArangoDBConstants.WAIT_FOR_SYNC, params.getWaitForSync()); - request.putHeaderParam(ArangoDBConstants.IF_MATCH, params.getIfMatch()); - return request; - } - -} diff --git a/src/main/java/com/arangodb/internal/http/HttpCommunication.java b/src/main/java/com/arangodb/internal/http/HttpCommunication.java deleted file mode 100644 index fafa9945e..000000000 --- a/src/main/java/com/arangodb/internal/http/HttpCommunication.java +++ /dev/null @@ -1,147 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.internal.http; - -import java.io.IOException; - -import javax.net.ssl.SSLContext; - -import com.arangodb.ArangoDBException; -import com.arangodb.Protocol; -import com.arangodb.internal.ArangoDBConstants; -import com.arangodb.internal.Host; -import com.arangodb.internal.net.ArangoDBRedirectException; -import com.arangodb.internal.net.ConnectionPool; -import com.arangodb.internal.net.DelHostHandler; -import com.arangodb.internal.net.HostHandle; -import com.arangodb.internal.net.HostHandler; -import com.arangodb.internal.util.HostUtils; -import com.arangodb.util.ArangoSerialization; -import com.arangodb.velocystream.Request; -import com.arangodb.velocystream.Response; - -/** - * @author Mark Vollmary - * - */ -public class HttpCommunication { - - public static class Builder { - - private final HostHandler hostHandler; - private final Protocol protocol; - private Integer timeout; - private String user; - private String password; - private Boolean useSsl; - private SSLContext sslContext; - private Integer maxConnections; - - public Builder(final HostHandler hostHandler, final Protocol protocol) { - super(); - this.hostHandler = hostHandler; - this.protocol = protocol; - } - - public Builder(final Builder builder) { - this(builder.hostHandler, builder.protocol); - timeout(builder.timeout).user(builder.user).password(builder.password).useSsl(builder.useSsl) - .sslContext(builder.sslContext).maxConnections(builder.maxConnections); - } - - public Builder timeout(final Integer timeout) { - this.timeout = timeout; - return this; - } - - public Builder user(final String user) { - this.user = user; - return this; - } - - public Builder password(final String password) { - this.password = password; - return this; - } - - public Builder useSsl(final Boolean useSsl) { - this.useSsl = useSsl; - return this; - } - - public Builder sslContext(final SSLContext sslContext) { - this.sslContext = sslContext; - return this; - } - - public Builder maxConnections(final Integer maxConnections) { - this.maxConnections = maxConnections; - return this; - } - - public HttpCommunication build(final ArangoSerialization util) { - return new HttpCommunication(timeout, user, password, useSsl, sslContext, util, hostHandler, maxConnections, - protocol); - } - } - - private final ConnectionPool connectionPool; - - private HttpCommunication(final Integer timeout, final String user, final String password, final Boolean useSsl, - final SSLContext sslContext, final ArangoSerialization util, final HostHandler hostHandler, - final Integer maxConnections, final Protocol contentType) { - super(); - connectionPool = new ConnectionPool( - maxConnections != null ? Math.max(1, maxConnections) : ArangoDBConstants.MAX_CONNECTIONS_HTTP_DEFAULT) { - @Override - public HttpConnection createConnection(final Host host) { - return new HttpConnection(timeout, user, password, useSsl, sslContext, util, - new DelHostHandler(hostHandler, host), contentType); - } - }; - } - - public void disconnect() throws IOException { - connectionPool.disconnect(); - } - - public Response execute(final Request request, final HostHandle hostHandle) throws ArangoDBException, IOException { - final HttpConnection connection = connectionPool.connection(hostHandle); - try { - return execute(request, connection); - } catch (final ArangoDBException e) { - if (e instanceof ArangoDBRedirectException) { - final String location = ArangoDBRedirectException.class.cast(e).getLocation(); - final Host host = HostUtils.createFromLocation(location); - connectionPool.closeConnectionOnError(connection); - return execute(request, new HostHandle().setHost(host)); - } else { - throw e; - } - } - } - - protected Response execute(final Request request, final HttpConnection connection) - throws ArangoDBException, IOException { - return connection.execute(request); - } - -} diff --git a/src/main/java/com/arangodb/internal/http/HttpConnection.java b/src/main/java/com/arangodb/internal/http/HttpConnection.java deleted file mode 100644 index bea703dd7..000000000 --- a/src/main/java/com/arangodb/internal/http/HttpConnection.java +++ /dev/null @@ -1,343 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2017 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.internal.http; - -import java.io.IOException; -import java.io.UnsupportedEncodingException; -import java.net.SocketException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; - -import javax.net.ssl.SSLContext; - -import org.apache.http.Header; -import org.apache.http.HeaderElement; -import org.apache.http.HeaderElementIterator; -import org.apache.http.HttpEntity; -import org.apache.http.HttpResponse; -import org.apache.http.NameValuePair; -import org.apache.http.auth.AuthenticationException; -import org.apache.http.auth.Credentials; -import org.apache.http.auth.UsernamePasswordCredentials; -import org.apache.http.client.config.RequestConfig; -import org.apache.http.client.methods.CloseableHttpResponse; -import org.apache.http.client.methods.HttpEntityEnclosingRequestBase; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.client.methods.HttpHead; -import org.apache.http.client.methods.HttpPatch; -import org.apache.http.client.methods.HttpPost; -import org.apache.http.client.methods.HttpPut; -import org.apache.http.client.methods.HttpRequestBase; -import org.apache.http.client.utils.URLEncodedUtils; -import org.apache.http.config.RegistryBuilder; -import org.apache.http.conn.ConnectionKeepAliveStrategy; -import org.apache.http.conn.socket.ConnectionSocketFactory; -import org.apache.http.conn.socket.PlainConnectionSocketFactory; -import org.apache.http.conn.ssl.SSLConnectionSocketFactory; -import org.apache.http.entity.ByteArrayEntity; -import org.apache.http.entity.ContentType; -import org.apache.http.entity.StringEntity; -import org.apache.http.impl.auth.BasicScheme; -import org.apache.http.impl.client.CloseableHttpClient; -import org.apache.http.impl.client.DefaultHttpRequestRetryHandler; -import org.apache.http.impl.client.HttpClientBuilder; -import org.apache.http.impl.conn.PoolingHttpClientConnectionManager; -import org.apache.http.message.BasicHeaderElementIterator; -import org.apache.http.message.BasicNameValuePair; -import org.apache.http.protocol.HTTP; -import org.apache.http.protocol.HttpContext; -import org.apache.http.ssl.SSLContexts; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import com.arangodb.ArangoDBException; -import com.arangodb.Protocol; -import com.arangodb.internal.Host; -import com.arangodb.internal.net.Connection; -import com.arangodb.internal.net.HostHandler; -import com.arangodb.internal.util.CURLLogger; -import com.arangodb.internal.util.IOUtils; -import com.arangodb.internal.util.ResponseUtils; -import com.arangodb.util.ArangoSerialization; -import com.arangodb.util.ArangoSerializer.Options; -import com.arangodb.velocypack.VPackSlice; -import com.arangodb.velocystream.Request; -import com.arangodb.velocystream.Response; - -/** - * @author Mark Vollmary - * - */ -public class HttpConnection implements Connection { - - private static final Logger LOGGER = LoggerFactory.getLogger(HttpCommunication.class); - private static final ContentType CONTENT_TYPE_APPLICATION_JSON_UTF8 = ContentType.create("application/json", - "utf-8"); - private static final ContentType CONTENT_TYPE_VPACK = ContentType.create("application/x-velocypack"); - private final PoolingHttpClientConnectionManager cm; - private final CloseableHttpClient client; - private final String user; - private final String password; - private final ArangoSerialization util; - private final HostHandler hostHandler; - private final Boolean useSsl; - private final Protocol contentType; - private Host host; - - public HttpConnection(final Integer timeout, final String user, final String password, final Boolean useSsl, - final SSLContext sslContext, final ArangoSerialization util, final HostHandler hostHandler, - final Protocol contentType) { - super(); - this.user = user; - this.password = password; - this.useSsl = useSsl; - this.util = util; - this.hostHandler = hostHandler; - this.contentType = contentType; - final RegistryBuilder registryBuilder = RegistryBuilder - . create(); - if (useSsl != null && useSsl) { - if (sslContext != null) { - registryBuilder.register("https", new SSLConnectionSocketFactory(sslContext)); - } else { - registryBuilder.register("https", new SSLConnectionSocketFactory(SSLContexts.createSystemDefault())); - } - } else { - registryBuilder.register("http", new PlainConnectionSocketFactory()); - } - cm = new PoolingHttpClientConnectionManager(registryBuilder.build()); - cm.setDefaultMaxPerRoute(1); - cm.setMaxTotal(1); - final RequestConfig.Builder requestConfig = RequestConfig.custom(); - if (timeout != null && timeout >= 0) { - requestConfig.setConnectTimeout(timeout); - requestConfig.setConnectionRequestTimeout(timeout); - requestConfig.setSocketTimeout(timeout); - } - final ConnectionKeepAliveStrategy keepAliveStrategy = new ConnectionKeepAliveStrategy() { - @Override - public long getKeepAliveDuration(final HttpResponse response, final HttpContext context) { - return HttpConnection.this.getKeepAliveDuration(response); - } - }; - final HttpClientBuilder builder = HttpClientBuilder.create().setDefaultRequestConfig(requestConfig.build()) - .setConnectionManager(cm).setKeepAliveStrategy(keepAliveStrategy) - .setRetryHandler(new DefaultHttpRequestRetryHandler()); - client = builder.build(); - } - - @Override - public Host getHost() { - return host; - } - - private long getKeepAliveDuration(final HttpResponse response) { - final HeaderElementIterator it = new BasicHeaderElementIterator(response.headerIterator(HTTP.CONN_KEEP_ALIVE)); - while (it.hasNext()) { - final HeaderElement he = it.nextElement(); - final String param = he.getName(); - final String value = he.getValue(); - if (value != null && "timeout".equalsIgnoreCase(param)) { - try { - return Long.parseLong(value) * 1000L; - } catch (final NumberFormatException ignore) { - } - } - } - return 30L * 1000L; - } - - @Override - public void close() throws IOException { - cm.shutdown(); - client.close(); - } - - @Override - public void closeOnError() throws IOException { - hostHandler.fail(); - close(); - } - - public Response execute(final Request request) throws ArangoDBException, IOException { - host = hostHandler.get(); - while (true) { - if (host == null) { - throw new ArangoDBException("Was not able to connect to any host"); - } - try { - final String url = buildUrl(buildBaseUrl(host), request); - final HttpRequestBase httpRequest = buildHttpRequestBase(request, url); - httpRequest.setHeader("User-Agent", - "Mozilla/5.0 (compatible; ArangoDB-JavaDriver/1.1; +http://mt.orz.at/)"); - if (contentType == Protocol.HTTP_VPACK) { - httpRequest.setHeader("Accept", "application/x-velocypack"); - } - addHeader(request, httpRequest); - final Credentials credentials = addCredentials(httpRequest); - if (LOGGER.isDebugEnabled()) { - CURLLogger.log(url, request, credentials, util); - } - Response response; - response = buildResponse(client.execute(httpRequest)); - checkError(response); - hostHandler.success(); - return response; - } catch (final SocketException e) { - hostHandler.fail(); - final Host failedHost = host; - host = hostHandler.get(); - if (host != null) { - LOGGER.warn(String.format("Could not connect to %s. Try connecting to %s", failedHost, host)); - } else { - throw e; - } - } - } - } - - private HttpRequestBase buildHttpRequestBase(final Request request, final String url) { - final HttpRequestBase httpRequest; - switch (request.getRequestType()) { - case POST: - httpRequest = requestWithBody(new HttpPost(url), request); - break; - case PUT: - httpRequest = requestWithBody(new HttpPut(url), request); - break; - case PATCH: - httpRequest = requestWithBody(new HttpPatch(url), request); - break; - case DELETE: - httpRequest = requestWithBody(new HttpDeleteWithBody(url), request); - break; - case HEAD: - httpRequest = new HttpHead(url); - break; - case GET: - default: - httpRequest = new HttpGet(url); - break; - } - return httpRequest; - } - - private HttpRequestBase requestWithBody(final HttpEntityEnclosingRequestBase httpRequest, final Request request) { - final VPackSlice body = request.getBody(); - if (body != null) { - if (contentType == Protocol.HTTP_VPACK) { - httpRequest.setEntity(new ByteArrayEntity( - Arrays.copyOfRange(body.getBuffer(), body.getStart(), body.getStart() + body.getByteSize()), - CONTENT_TYPE_VPACK)); - } else { - httpRequest.setEntity(new StringEntity(body.toString(), CONTENT_TYPE_APPLICATION_JSON_UTF8)); - } - } - return httpRequest; - } - - private String buildBaseUrl(final Host host) { - return (useSsl != null && useSsl ? "https://" : "http://") + host.getHost() + ":" + host.getPort(); - } - - private static String buildUrl(final String baseUrl, final Request request) throws UnsupportedEncodingException { - final StringBuilder sb = new StringBuilder().append(baseUrl); - final String database = request.getDatabase(); - if (database != null && !database.isEmpty()) { - sb.append("/_db/").append(database); - } - sb.append(request.getRequest()); - if (!request.getQueryParam().isEmpty()) { - if (request.getRequest().contains("?")) { - sb.append("&"); - } else { - sb.append("?"); - } - final String paramString = URLEncodedUtils.format(toList(request.getQueryParam()), "utf-8"); - sb.append(paramString); - } - return sb.toString(); - } - - private static List toList(final Map parameters) { - final ArrayList paramList = new ArrayList(parameters.size()); - for (final Entry param : parameters.entrySet()) { - if (param.getValue() != null) { - paramList.add(new BasicNameValuePair(param.getKey(), param.getValue().toString())); - } - } - return paramList; - } - - private static void addHeader(final Request request, final HttpRequestBase httpRequest) { - for (final Entry header : request.getHeaderParam().entrySet()) { - httpRequest.addHeader(header.getKey(), header.getValue()); - } - } - - public Credentials addCredentials(final HttpRequestBase httpRequest) { - Credentials credentials = null; - if (user != null) { - credentials = new UsernamePasswordCredentials(user, password != null ? password : ""); - try { - httpRequest.addHeader(new BasicScheme().authenticate(credentials, httpRequest, null)); - } catch (final AuthenticationException e) { - throw new ArangoDBException(e); - } - } - return credentials; - } - - public Response buildResponse(final CloseableHttpResponse httpResponse) - throws UnsupportedOperationException, IOException { - final Response response = new Response(); - response.setResponseCode(httpResponse.getStatusLine().getStatusCode()); - final HttpEntity entity = httpResponse.getEntity(); - if (entity != null && entity.getContent() != null) { - if (contentType == Protocol.HTTP_VPACK) { - final byte[] content = IOUtils.toByteArray(entity.getContent()); - if (content.length > 0) { - response.setBody(new VPackSlice(content)); - } - } else { - final String content = IOUtils.toString(entity.getContent()); - if (!content.isEmpty()) { - response.setBody( - util.serialize(content, new Options().stringAsJson(true).serializeNullValues(true))); - } - } - } - final Header[] headers = httpResponse.getAllHeaders(); - final Map meta = response.getMeta(); - for (final Header header : headers) { - meta.put(header.getName(), header.getValue()); - } - return response; - } - - protected void checkError(final Response response) throws ArangoDBException { - ResponseUtils.checkError(util, response); - } - -} diff --git a/src/main/java/com/arangodb/internal/http/HttpDeleteWithBody.java b/src/main/java/com/arangodb/internal/http/HttpDeleteWithBody.java deleted file mode 100644 index 483a21925..000000000 --- a/src/main/java/com/arangodb/internal/http/HttpDeleteWithBody.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.internal.http; - -import java.net.URI; - -import org.apache.http.client.methods.HttpEntityEnclosingRequestBase; - -/** - * @author Mark Vollmary - * - */ -public class HttpDeleteWithBody extends HttpEntityEnclosingRequestBase { - public final static String METHOD_NAME = "DELETE"; - - public HttpDeleteWithBody(final String uri) { - super(); - setURI(URI.create(uri)); - } - - @Override - public String getMethod() { - return METHOD_NAME; - } - -} diff --git a/src/main/java/com/arangodb/internal/net/ConnectionPool.java b/src/main/java/com/arangodb/internal/net/ConnectionPool.java deleted file mode 100644 index a1af33972..000000000 --- a/src/main/java/com/arangodb/internal/net/ConnectionPool.java +++ /dev/null @@ -1,96 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.internal.net; - -import java.io.IOException; -import java.util.LinkedList; - -import com.arangodb.ArangoDBException; -import com.arangodb.internal.Host; - -/** - * @author Mark Vollmary - * - */ -public abstract class ConnectionPool { - - private final LinkedList connections; - private final int maxConnections; - - public ConnectionPool(final Integer maxConnections) { - super(); - this.maxConnections = maxConnections; - connections = new LinkedList(); - } - - public abstract C createConnection(final Host host); - - public synchronized C connection(final HostHandle hostHandle) { - final C c; - if (hostHandle == null || hostHandle.getHost() == null) { - if (connections.size() < maxConnections) { - c = createConnection(null); - } else { - c = connections.removeFirst(); - } - if (hostHandle != null) { - hostHandle.setHost(c.getHost()); - } - } else { - final Host host = hostHandle.getHost(); - C tmp = null; - for (final C connection : connections) { - if (connection.getHost().equals(host)) { - tmp = connection; - connections.remove(tmp); - break; - } - } - c = tmp != null ? tmp : createConnection(host); - } - connections.add(c); - return c; - } - - public void disconnect() throws IOException { - while (!connections.isEmpty()) { - connections.removeLast().close(); - } - } - - public void closeConnection(final C connection) { - try { - connection.close(); - connections.remove(connection); - } catch (final IOException e) { - throw new ArangoDBException(e); - } - } - - public void closeConnectionOnError(final C connection) { - try { - connection.closeOnError(); - connections.remove(connection); - } catch (final IOException e) { - throw new ArangoDBException(e); - } - } -} diff --git a/src/main/java/com/arangodb/internal/net/DelHostHandler.java b/src/main/java/com/arangodb/internal/net/DelHostHandler.java deleted file mode 100644 index 6b8c40bb8..000000000 --- a/src/main/java/com/arangodb/internal/net/DelHostHandler.java +++ /dev/null @@ -1,61 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2017 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.internal.net; - -import com.arangodb.internal.Host; - -/** - * @author Mark Vollmary - * - */ -public class DelHostHandler implements HostHandler { - - private final HostHandler hostHandler; - private Host host; - - public DelHostHandler(final HostHandler hostHandler, final Host host) { - super(); - this.hostHandler = hostHandler; - this.host = host; - } - - @Override - public Host get() { - return host != null ? host : hostHandler.get(); - } - - @Override - public void success() { - if (host == null) { - hostHandler.success(); - } - } - - @Override - public void fail() { - if (host == null) { - hostHandler.fail(); - } else { - host = null; - } - } - -} diff --git a/src/main/java/com/arangodb/internal/net/ExtendedHostResolver.java b/src/main/java/com/arangodb/internal/net/ExtendedHostResolver.java deleted file mode 100644 index 02acad2cb..000000000 --- a/src/main/java/com/arangodb/internal/net/ExtendedHostResolver.java +++ /dev/null @@ -1,74 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2017 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.internal.net; - -import java.util.ArrayList; -import java.util.Collection; -import java.util.List; - -import com.arangodb.internal.Host; - -/** - * @author Mark Vollmary - * - */ -public class ExtendedHostResolver implements HostResolver { - - private static final long MAX_CACHE_TIME = 60 * 60 * 1000; - private EndpointResolver resolver; - private final List hosts; - private long lastUpdate; - - public ExtendedHostResolver(final List hosts) { - super(); - this.hosts = new ArrayList(hosts); - lastUpdate = 0; - } - - @Override - public void init(final EndpointResolver resolver) { - this.resolver = resolver; - } - - @Override - public List resolve(final boolean initial, final boolean closeConnections) { - if (!initial && isExpired()) { - lastUpdate = System.currentTimeMillis(); - final Collection endpoints = resolver.resolve(closeConnections); - if (!endpoints.isEmpty()) { - hosts.clear(); - } - for (final String endpoint : endpoints) { - if (endpoint.matches(".*://.+:[0-9]+")) { - final String[] s = endpoint.replaceAll(".*://", "").split(":"); - if (s.length == 2) { - hosts.add(new Host(s[0], Integer.valueOf(s[1]))); - } - } - } - } - return hosts; - } - - private boolean isExpired() { - return System.currentTimeMillis() > lastUpdate + MAX_CACHE_TIME; - } -} diff --git a/src/main/java/com/arangodb/internal/net/FallbackHostHandler.java b/src/main/java/com/arangodb/internal/net/FallbackHostHandler.java deleted file mode 100644 index d2b3e23e7..000000000 --- a/src/main/java/com/arangodb/internal/net/FallbackHostHandler.java +++ /dev/null @@ -1,65 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.internal.net; - -import java.util.List; - -import com.arangodb.internal.Host; - -/** - * @author Mark Vollmary - * - */ -public class FallbackHostHandler implements HostHandler { - - private Host current; - private Host lastSuccess; - private int iterations; - private final HostResolver resolver; - - public FallbackHostHandler(final HostResolver resolver) { - this.resolver = resolver; - iterations = 0; - current = lastSuccess = resolver.resolve(true, false).get(0); - } - - @Override - public Host get() { - return current != lastSuccess || iterations < 3 ? current : null; - } - - @Override - public void success() { - lastSuccess = current; - } - - @Override - public void fail() { - final List hosts = resolver.resolve(false, false); - final int index = hosts.indexOf(current) + 1; - final boolean inBound = index < hosts.size(); - current = hosts.get(inBound ? index : 0); - if (!inBound) { - iterations++; - } - } - -} diff --git a/src/main/java/com/arangodb/internal/net/RandomHostHandler.java b/src/main/java/com/arangodb/internal/net/RandomHostHandler.java deleted file mode 100644 index e4888d982..000000000 --- a/src/main/java/com/arangodb/internal/net/RandomHostHandler.java +++ /dev/null @@ -1,71 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2017 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.internal.net; - -import java.util.ArrayList; -import java.util.Collections; - -import com.arangodb.internal.Host; - -/** - * @author Mark Vollmary - * - */ -public class RandomHostHandler implements HostHandler { - - private final HostResolver resolver; - private final HostHandler fallback; - private Host origin; - private Host current; - - public RandomHostHandler(final HostResolver resolver, final HostHandler fallback) { - super(); - this.resolver = resolver; - this.fallback = fallback; - origin = current = getRandomHost(true, false); - } - - @Override - public Host get() { - if (current == null) { - origin = current = getRandomHost(false, true); - } - return current; - } - - @Override - public void success() { - current = origin; - } - - @Override - public void fail() { - fallback.fail(); - current = fallback.get(); - } - - private Host getRandomHost(final boolean initial, final boolean closeConnections) { - final ArrayList hosts = new ArrayList(resolver.resolve(initial, closeConnections)); - Collections.shuffle(hosts); - return hosts.get(0); - } - -} diff --git a/src/main/java/com/arangodb/internal/net/RoundRobinHostHandler.java b/src/main/java/com/arangodb/internal/net/RoundRobinHostHandler.java deleted file mode 100644 index 4f7190573..000000000 --- a/src/main/java/com/arangodb/internal/net/RoundRobinHostHandler.java +++ /dev/null @@ -1,65 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2017 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.internal.net; - -import java.util.List; - -import com.arangodb.internal.Host; - -/** - * @author Mark Vollmary - * - */ -public class RoundRobinHostHandler implements HostHandler { - - private final HostResolver resolver; - private Host current; - private int fails; - - public RoundRobinHostHandler(final HostResolver resolver) { - super(); - this.resolver = resolver; - current = resolver.resolve(true, false).get(0); - fails = 0; - } - - @Override - public Host get() { - final List hosts = resolver.resolve(false, false); - if (fails > hosts.size()) { - return null; - } - final int index = hosts.indexOf(current) + 1; - current = hosts.get(index < hosts.size() ? index : 0); - return current; - } - - @Override - public void success() { - fails = 0; - } - - @Override - public void fail() { - fails++; - } - -} diff --git a/src/main/java/com/arangodb/internal/net/SimpleHostResolver.java b/src/main/java/com/arangodb/internal/net/SimpleHostResolver.java deleted file mode 100644 index 2a2348c68..000000000 --- a/src/main/java/com/arangodb/internal/net/SimpleHostResolver.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2017 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.internal.net; - -import java.util.List; - -import com.arangodb.internal.Host; - -/** - * @author Mark Vollmary - * - */ -public class SimpleHostResolver implements HostResolver { - - private final List hosts; - - public SimpleHostResolver(final List hosts) { - super(); - this.hosts = hosts; - } - - @Override - public void init(final EndpointResolver resolver) { - } - - @Override - public List resolve(final boolean initial, final boolean closeConnections) { - return hosts; - } - -} diff --git a/src/main/java/com/arangodb/internal/util/ArangoDeserializerImpl.java b/src/main/java/com/arangodb/internal/util/ArangoDeserializerImpl.java deleted file mode 100644 index 416dcbdb8..000000000 --- a/src/main/java/com/arangodb/internal/util/ArangoDeserializerImpl.java +++ /dev/null @@ -1,62 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.internal.util; - -import java.lang.reflect.Type; - -import com.arangodb.ArangoDBException; -import com.arangodb.util.ArangoDeserializer; -import com.arangodb.velocypack.VPack; -import com.arangodb.velocypack.VPackParser; -import com.arangodb.velocypack.VPackSlice; -import com.arangodb.velocypack.exception.VPackException; - -/** - * @author Mark Vollmary - * - */ -public class ArangoDeserializerImpl implements ArangoDeserializer { - - private final VPack vpacker; - private final VPackParser vpackParser; - - public ArangoDeserializerImpl(final VPack vpacker, final VPackParser vpackParser) { - super(); - this.vpacker = vpacker; - this.vpackParser = vpackParser; - } - - @Override - @SuppressWarnings("unchecked") - public T deserialize(final VPackSlice vpack, final Type type) throws ArangoDBException { - try { - final T doc; - if (type == String.class && !vpack.isString()) { - doc = (T) vpackParser.toJson(vpack, true); - } else { - doc = vpacker.deserialize(vpack, type); - } - return doc; - } catch (final VPackException e) { - throw new ArangoDBException(e); - } - } -} diff --git a/src/main/java/com/arangodb/internal/util/ArangoSerializerImpl.java b/src/main/java/com/arangodb/internal/util/ArangoSerializerImpl.java deleted file mode 100644 index f1e7b6d5f..000000000 --- a/src/main/java/com/arangodb/internal/util/ArangoSerializerImpl.java +++ /dev/null @@ -1,123 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.internal.util; - -import java.lang.reflect.Type; -import java.util.Iterator; -import java.util.Map; - -import com.arangodb.ArangoDBException; -import com.arangodb.util.ArangoSerializer; -import com.arangodb.velocypack.VPack; -import com.arangodb.velocypack.VPack.SerializeOptions; -import com.arangodb.velocypack.VPackParser; -import com.arangodb.velocypack.VPackSlice; -import com.arangodb.velocypack.exception.VPackException; - -/** - * @author Mark Vollmary - * - */ -public class ArangoSerializerImpl implements ArangoSerializer { - - private final VPack vpacker; - private final VPack vpackerNull; - private final VPackParser vpackParser; - - public ArangoSerializerImpl(final VPack vpacker, final VPack vpackerNull, final VPackParser vpackParser) { - super(); - this.vpacker = vpacker; - this.vpackerNull = vpackerNull; - this.vpackParser = vpackParser; - } - - @Override - public VPackSlice serialize(final Object entity) throws ArangoDBException { - return serialize(entity, new ArangoSerializer.Options()); - } - - @SuppressWarnings("unchecked") - @Override - public VPackSlice serialize(final Object entity, final Options options) throws ArangoDBException { - if (options.getType() == null) { - options.type(entity.getClass()); - } - try { - final VPackSlice vpack; - final Class type = entity.getClass(); - final boolean serializeNullValues = options.isSerializeNullValues(); - if (String.class.isAssignableFrom(type)) { - vpack = vpackParser.fromJson((String) entity, serializeNullValues); - } else if (options.isStringAsJson() && Iterable.class.isAssignableFrom(type)) { - final Iterator iterator = Iterable.class.cast(entity).iterator(); - if (iterator.hasNext() && String.class.isAssignableFrom(iterator.next().getClass())) { - vpack = vpackParser.fromJson((Iterable) entity, serializeNullValues); - } else { - final VPack vp = serializeNullValues ? vpackerNull : vpacker; - vpack = vp.serialize(entity, - new SerializeOptions().type(options.getType()).additionalFields(options.getAdditionalFields())); - } - } else { - final VPack vp = serializeNullValues ? vpackerNull : vpacker; - vpack = vp.serialize(entity, - new SerializeOptions().type(options.getType()).additionalFields(options.getAdditionalFields())); - } - return vpack; - } catch (final VPackException e) { - throw new ArangoDBException(e); - } - } - - @Override - @Deprecated - public VPackSlice serialize(final Object entity, final boolean serializeNullValues) throws ArangoDBException { - return serialize(entity, new ArangoSerializer.Options().serializeNullValues(serializeNullValues)); - } - - @Override - @Deprecated - public VPackSlice serialize(final Object entity, final boolean serializeNullValues, final boolean stringAsJson) - throws ArangoDBException { - return serialize(entity, - new ArangoSerializer.Options().serializeNullValues(serializeNullValues).stringAsJson(stringAsJson)); - } - - @Override - @Deprecated - public VPackSlice serialize(final Object entity, final Type type) throws ArangoDBException { - return serialize(entity, new ArangoSerializer.Options().type(type)); - } - - @Override - @Deprecated - public VPackSlice serialize(final Object entity, final Type type, final boolean serializeNullValues) - throws ArangoDBException { - return serialize(entity, new ArangoSerializer.Options().type(type).serializeNullValues(serializeNullValues)); - } - - @Override - @Deprecated - public VPackSlice serialize(final Object entity, final Type type, final Map additionalFields) - throws ArangoDBException { - return serialize(entity, new ArangoSerializer.Options().type(type).additionalFields(additionalFields)); - } - -} diff --git a/src/main/java/com/arangodb/internal/util/ArangoUtilImpl.java b/src/main/java/com/arangodb/internal/util/ArangoUtilImpl.java deleted file mode 100644 index e96a5934b..000000000 --- a/src/main/java/com/arangodb/internal/util/ArangoUtilImpl.java +++ /dev/null @@ -1,95 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.internal.util; - -import java.lang.reflect.Type; -import java.util.Map; - -import com.arangodb.ArangoDBException; -import com.arangodb.util.ArangoDeserializer; -import com.arangodb.util.ArangoSerializer; -import com.arangodb.util.ArangoSerialization; -import com.arangodb.velocypack.VPackSlice; - -/** - * @author Mark Vollmary - * - */ -public class ArangoUtilImpl implements ArangoSerialization { - - private final ArangoSerializer serializer; - private final ArangoDeserializer deserializer; - - public ArangoUtilImpl(final ArangoSerializer serializer, final ArangoDeserializer deserializer) { - super(); - this.serializer = serializer; - this.deserializer = deserializer; - } - - @Override - public VPackSlice serialize(final Object entity) throws ArangoDBException { - return serializer.serialize(entity); - } - - @Override - public VPackSlice serialize(final Object entity, final Options options) throws ArangoDBException { - return serializer.serialize(entity, options); - } - - @Override - @Deprecated - public VPackSlice serialize(final Object entity, final boolean serializeNullValues) throws ArangoDBException { - return serializer.serialize(entity, serializeNullValues); - } - - @Override - @Deprecated - public VPackSlice serialize(final Object entity, final boolean serializeNullValues, final boolean stringAsJson) - throws ArangoDBException { - return serializer.serialize(entity, serializeNullValues, stringAsJson); - } - - @Override - @Deprecated - public VPackSlice serialize(final Object entity, final Type type) throws ArangoDBException { - return serializer.serialize(entity, type); - } - - @Override - @Deprecated - public VPackSlice serialize(final Object entity, final Type type, final boolean serializeNullValues) - throws ArangoDBException { - return serializer.serialize(entity, type, serializeNullValues); - } - - @Override - @Deprecated - public VPackSlice serialize(final Object entity, final Type type, final Map additionalFields) - throws ArangoDBException { - return serializer.serialize(entity, type, additionalFields); - } - - @Override - public T deserialize(final VPackSlice vpack, final Type type) throws ArangoDBException { - return deserializer.deserialize(vpack, type); - } - -} diff --git a/src/main/java/com/arangodb/internal/util/CURLLogger.java b/src/main/java/com/arangodb/internal/util/CURLLogger.java deleted file mode 100644 index 5f60b857d..000000000 --- a/src/main/java/com/arangodb/internal/util/CURLLogger.java +++ /dev/null @@ -1,80 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.internal.util; - -import java.util.Map.Entry; - -import org.apache.http.auth.Credentials; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import com.arangodb.util.ArangoSerialization; -import com.arangodb.velocystream.Request; -import com.arangodb.velocystream.RequestType; - -/** - * @author Mark Vollmary - * - */ -public class CURLLogger { - - private static Logger LOGGER = LoggerFactory.getLogger(CURLLogger.class); - - private CURLLogger() { - } - - public static void log( - final String url, - final Request request, - final Credentials credencials, - final ArangoSerialization util) { - final RequestType requestType = request.getRequestType(); - final boolean includeBody = (requestType == RequestType.POST || requestType == RequestType.PUT - || requestType == RequestType.PATCH || requestType == RequestType.DELETE) && request.getBody() != null; - final StringBuilder buffer = new StringBuilder(); - if (includeBody) { - buffer.append("\n"); - buffer.append("cat <<-___EOB___ | "); - } - buffer.append("curl -X ").append(requestType); - buffer.append(" --dump -"); - if (request.getHeaderParam().size() > 0) { - for (final Entry header : request.getHeaderParam().entrySet()) { - buffer.append(" -H '").append(header.getKey()).append(":").append(header.getValue()).append("'"); - } - } - if (credencials != null) { - buffer.append(" -u ").append(credencials.getUserPrincipal().getName()).append(":") - .append(credencials.getPassword()); - } - if (includeBody) { - buffer.append(" -d @-"); - } - buffer.append(" '").append(url).append("'"); - if (includeBody) { - buffer.append("\n"); - buffer.append((String) util.deserialize(request.getBody(), String.class)); - buffer.append("\n"); - buffer.append("___EOB___"); - } - LOGGER.debug("[CURL] {}", buffer); - } -} diff --git a/src/main/java/com/arangodb/internal/util/EncodeUtils.java b/src/main/java/com/arangodb/internal/util/EncodeUtils.java deleted file mode 100644 index 1bdd59018..000000000 --- a/src/main/java/com/arangodb/internal/util/EncodeUtils.java +++ /dev/null @@ -1,40 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.internal.util; - -import java.io.UnsupportedEncodingException; -import java.net.URLEncoder; - -/** - * @author Mark Vollmary - * - */ -public class EncodeUtils { - - private EncodeUtils() { - } - - public static String encodeURL(final String value) throws UnsupportedEncodingException { - return URLEncoder.encode(value, "UTF-8").replaceAll("\\+", "%20").replaceAll("\\%21", "!") - .replaceAll("\\%27", "'").replaceAll("\\%28", "(").replaceAll("\\%29", ")").replaceAll("\\%7E", "~"); - } - -} diff --git a/src/main/java/com/arangodb/internal/util/HostUtils.java b/src/main/java/com/arangodb/internal/util/HostUtils.java deleted file mode 100644 index 0e5e53692..000000000 --- a/src/main/java/com/arangodb/internal/util/HostUtils.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2017 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.internal.util; - -import com.arangodb.internal.Host; - -/** - * @author Mark Vollmary - * - */ -public class HostUtils { - - private HostUtils() { - super(); - } - - public static Host createFromLocation(final String location) { - final Host host; - if (location != null) { - final String[] tmp = location.replaceAll(".*://", "").replaceAll("/.*", "").split(":"); - host = tmp.length == 2 ? new Host(tmp[0], Integer.valueOf(tmp[1])) : null; - } else { - host = null; - } - return host; - } - -} diff --git a/src/main/java/com/arangodb/internal/util/IOUtils.java b/src/main/java/com/arangodb/internal/util/IOUtils.java deleted file mode 100644 index 682812280..000000000 --- a/src/main/java/com/arangodb/internal/util/IOUtils.java +++ /dev/null @@ -1,76 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.internal.util; - -import java.io.BufferedInputStream; -import java.io.ByteArrayOutputStream; -import java.io.IOException; -import java.io.InputStream; -import java.io.InputStreamReader; -import java.io.UnsupportedEncodingException; - -/** - * @author Mark Vollmary - * - */ -public class IOUtils { - - private IOUtils() { - } - - public static String toString(final InputStream input) throws IOException { - return toString(input, "utf-8"); - } - - public static String toString(final InputStream input, final String encode) throws IOException { - try { - final StringBuilder buffer = new StringBuilder(8012); - final InputStreamReader in = new InputStreamReader(new BufferedInputStream(input), encode); - final char[] cbuf = new char[8012]; - int len; - while ((len = in.read(cbuf)) != -1) { - buffer.append(cbuf, 0, len); - } - return buffer.toString(); - } catch (final UnsupportedEncodingException e) { - throw new RuntimeException(e); - } finally { - if (input != null) { - try { - input.close(); - } catch (final IOException e) { - // TODO - } - } - } - } - - public static byte[] toByteArray(final InputStream input) throws IOException { - final ByteArrayOutputStream buffer = new ByteArrayOutputStream(); - int nRead; - final byte[] data = new byte[8012]; - while ((nRead = input.read(data, 0, data.length)) != -1) { - buffer.write(data, 0, nRead); - } - buffer.flush(); - return buffer.toByteArray(); - } -} diff --git a/src/main/java/com/arangodb/internal/util/ResponseUtils.java b/src/main/java/com/arangodb/internal/util/ResponseUtils.java deleted file mode 100644 index c3e4ecc9e..000000000 --- a/src/main/java/com/arangodb/internal/util/ResponseUtils.java +++ /dev/null @@ -1,62 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2017 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.internal.util; - -import com.arangodb.ArangoDBException; -import com.arangodb.entity.ErrorEntity; -import com.arangodb.internal.net.ArangoDBRedirectException; -import com.arangodb.util.ArangoSerialization; -import com.arangodb.velocypack.exception.VPackParserException; -import com.arangodb.velocystream.Response; - -/** - * @author Mark Vollmary - * - */ -public class ResponseUtils { - - private static final int ERROR_STATUS = 300; - private static final int ERROR_INTERNAL = 503; - private static final String HEADER_ENDPOINT = "X-Arango-Endpoint"; - - private ResponseUtils() { - super(); - } - - public static void checkError(final ArangoSerialization util, final Response response) throws ArangoDBException { - try { - final int responseCode = response.getResponseCode(); - if (responseCode >= ERROR_STATUS) { - if (responseCode == ERROR_INTERNAL && response.getMeta().containsKey(HEADER_ENDPOINT)) { - throw new ArangoDBRedirectException(String.format("Response Code: %s", responseCode), - response.getMeta().get(HEADER_ENDPOINT)); - } else if (response.getBody() != null) { - final ErrorEntity errorEntity = util.deserialize(response.getBody(), ErrorEntity.class); - throw new ArangoDBException(errorEntity); - } else { - throw new ArangoDBException(String.format("Response Code: %s", responseCode), responseCode); - } - } - } catch (final VPackParserException e) { - throw new ArangoDBException(e); - } - } -} diff --git a/src/main/java/com/arangodb/internal/velocypack/VPackDeserializers.java b/src/main/java/com/arangodb/internal/velocypack/VPackDeserializers.java deleted file mode 100644 index 095d98ff8..000000000 --- a/src/main/java/com/arangodb/internal/velocypack/VPackDeserializers.java +++ /dev/null @@ -1,171 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.internal.velocypack; - -import java.text.ParseException; -import java.text.SimpleDateFormat; -import java.util.Date; -import java.util.Map; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import com.arangodb.entity.ArangoDBVersion; -import com.arangodb.entity.ArangoDBVersion.License; -import com.arangodb.entity.BaseDocument; -import com.arangodb.entity.BaseEdgeDocument; -import com.arangodb.entity.CollectionStatus; -import com.arangodb.entity.CollectionType; -import com.arangodb.entity.LogLevel; -import com.arangodb.entity.Permissions; -import com.arangodb.entity.QueryExecutionState; -import com.arangodb.velocypack.VPackDeserializationContext; -import com.arangodb.velocypack.VPackDeserializer; -import com.arangodb.velocypack.VPackSlice; -import com.arangodb.velocypack.exception.VPackException; -import com.arangodb.velocystream.Response; - -/** - * @author Mark Vollmary - * - */ -public class VPackDeserializers { - - private static final Logger LOGGER = LoggerFactory.getLogger(VPackDeserializers.class); - private static final String DATE_TIME_FORMAT = "yyyy-MM-dd'T'HH:mm:ss.SSSZ"; - - public static final VPackDeserializer RESPONSE = new VPackDeserializer() { - @SuppressWarnings("unchecked") - @Override - public Response deserialize( - final VPackSlice parent, - final VPackSlice vpack, - final VPackDeserializationContext context) throws VPackException { - final Response response = new Response(); - response.setVersion(vpack.get(0).getAsInt()); - response.setType(vpack.get(1).getAsInt()); - response.setResponseCode(vpack.get(2).getAsInt()); - if (vpack.size() > 3) { - response.setMeta(context.deserialize(vpack.get(3), Map.class)); - } - return response; - } - }; - - public static final VPackDeserializer COLLECTION_TYPE = new VPackDeserializer() { - @Override - public CollectionType deserialize( - final VPackSlice parent, - final VPackSlice vpack, - final VPackDeserializationContext context) throws VPackException { - return CollectionType.fromType(vpack.getAsInt()); - } - }; - - public static final VPackDeserializer COLLECTION_STATUS = new VPackDeserializer() { - @Override - public CollectionStatus deserialize( - final VPackSlice parent, - final VPackSlice vpack, - final VPackDeserializationContext context) throws VPackException { - return CollectionStatus.fromStatus(vpack.getAsInt()); - } - }; - - @SuppressWarnings("unchecked") - public static final VPackDeserializer BASE_DOCUMENT = new VPackDeserializer() { - @Override - public BaseDocument deserialize( - final VPackSlice parent, - final VPackSlice vpack, - final VPackDeserializationContext context) throws VPackException { - return new BaseDocument(context.deserialize(vpack, Map.class)); - } - }; - - @SuppressWarnings("unchecked") - public static final VPackDeserializer BASE_EDGE_DOCUMENT = new VPackDeserializer() { - @Override - public BaseEdgeDocument deserialize( - final VPackSlice parent, - final VPackSlice vpack, - final VPackDeserializationContext context) throws VPackException { - return new BaseEdgeDocument(context.deserialize(vpack, Map.class)); - } - }; - - public static final VPackDeserializer DATE_STRING = new VPackDeserializer() { - @Override - public Date deserialize( - final VPackSlice parent, - final VPackSlice vpack, - final VPackDeserializationContext context) throws VPackException { - try { - return new SimpleDateFormat(DATE_TIME_FORMAT).parse(vpack.getAsString()); - } catch (final ParseException e) { - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("got ParseException for date string: " + vpack.getAsString()); - } - } - return null; - } - }; - - public static final VPackDeserializer LOG_LEVEL = new VPackDeserializer() { - @Override - public LogLevel deserialize( - final VPackSlice parent, - final VPackSlice vpack, - final VPackDeserializationContext context) throws VPackException { - return LogLevel.fromLevel(vpack.getAsInt()); - } - }; - - public static final VPackDeserializer LICENSE = new VPackDeserializer() { - @Override - public License deserialize( - final VPackSlice parent, - final VPackSlice vpack, - final VPackDeserializationContext context) throws VPackException { - return License.valueOf(vpack.getAsString().toUpperCase()); - } - }; - - public static final VPackDeserializer PERMISSIONS = new VPackDeserializer() { - @Override - public Permissions deserialize( - final VPackSlice parent, - final VPackSlice vpack, - final VPackDeserializationContext context) throws VPackException { - return Permissions.valueOf(vpack.getAsString().toUpperCase()); - } - }; - - public static final VPackDeserializer QUERY_EXECUTION_STATE = new VPackDeserializer() { - @Override - public QueryExecutionState deserialize( - final VPackSlice parent, - final VPackSlice vpack, - final VPackDeserializationContext context) throws VPackException { - return QueryExecutionState.valueOf(vpack.getAsString().toUpperCase().replaceAll(" ", "_")); - } - }; -} diff --git a/src/main/java/com/arangodb/internal/velocypack/VPackDocumentModule.java b/src/main/java/com/arangodb/internal/velocypack/VPackDocumentModule.java deleted file mode 100644 index 88a07a15b..000000000 --- a/src/main/java/com/arangodb/internal/velocypack/VPackDocumentModule.java +++ /dev/null @@ -1,103 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.internal.velocypack; - -import org.json.simple.JSONValue; - -import com.arangodb.internal.CollectionCache; -import com.arangodb.velocypack.VPackDeserializationContext; -import com.arangodb.velocypack.VPackDeserializer; -import com.arangodb.velocypack.VPackJsonDeserializer; -import com.arangodb.velocypack.VPackModule; -import com.arangodb.velocypack.VPackParserModule; -import com.arangodb.velocypack.VPackParserSetupContext; -import com.arangodb.velocypack.VPackSetupContext; -import com.arangodb.velocypack.VPackSlice; -import com.arangodb.velocypack.ValueType; -import com.arangodb.velocypack.exception.VPackException; -import com.arangodb.velocypack.internal.util.NumberUtil; - -/** - * @author Mark Vollmary - * - */ -public class VPackDocumentModule implements VPackModule, VPackParserModule { - - private static final String ID = "_id"; - private final CollectionCache collectionCache; - - public VPackDocumentModule(final CollectionCache collectionCache) { - super(); - this.collectionCache = collectionCache; - } - - @Override - public > void setup(final C context) { - context.registerDeserializer(ID, String.class, new VPackDeserializer() { - @Override - public String deserialize( - final VPackSlice parent, - final VPackSlice vpack, - final VPackDeserializationContext context) throws VPackException { - final String id; - if (vpack.isCustom()) { - final long idLong = NumberUtil.toLong(vpack.getBuffer(), vpack.getStart() + 1, - vpack.getByteSize() - 1); - final String collectionName = collectionCache.getCollectionName(idLong); - if (collectionName != null) { - final VPackSlice key = parent.get("_key"); - id = String.format("%s/%s", collectionName, key.getAsString()); - } else { - id = null; - } - } else { - id = vpack.getAsString(); - } - return id; - } - }); - - } - - @Override - public > void setup(final C context) { - context.registerDeserializer(ID, ValueType.CUSTOM, new VPackJsonDeserializer() { - @Override - public void deserialize( - final VPackSlice parent, - final String attribute, - final VPackSlice vpack, - final StringBuilder json) throws VPackException { - final String id; - final long idLong = NumberUtil.toLong(vpack.getBuffer(), vpack.getStart() + 1, vpack.getByteSize() - 1); - final String collectionName = collectionCache.getCollectionName(idLong); - if (collectionName != null) { - final VPackSlice key = parent.get("_key"); - id = String.format("%s/%s", collectionName, key.getAsString()); - } else { - id = null; - } - json.append(JSONValue.toJSONString(id)); - } - }); - } - -} diff --git a/src/main/java/com/arangodb/internal/velocypack/VPackDriverModule.java b/src/main/java/com/arangodb/internal/velocypack/VPackDriverModule.java deleted file mode 100644 index 964aa73c6..000000000 --- a/src/main/java/com/arangodb/internal/velocypack/VPackDriverModule.java +++ /dev/null @@ -1,90 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.internal.velocypack; - -import java.lang.reflect.Field; -import java.util.Date; - -import com.arangodb.entity.ArangoDBVersion; -import com.arangodb.entity.BaseDocument; -import com.arangodb.entity.BaseEdgeDocument; -import com.arangodb.entity.CollectionStatus; -import com.arangodb.entity.CollectionType; -import com.arangodb.entity.DocumentField; -import com.arangodb.entity.LogLevel; -import com.arangodb.entity.Permissions; -import com.arangodb.entity.QueryEntity; -import com.arangodb.entity.QueryExecutionState; -import com.arangodb.internal.velocystream.internal.AuthenticationRequest; -import com.arangodb.model.TraversalOptions; -import com.arangodb.velocypack.VPackFieldNamingStrategy; -import com.arangodb.velocypack.VPackModule; -import com.arangodb.velocypack.VPackParserModule; -import com.arangodb.velocypack.VPackParserSetupContext; -import com.arangodb.velocypack.VPackSetupContext; -import com.arangodb.velocystream.Request; -import com.arangodb.velocystream.Response; - -/** - * @author Mark Vollmary - * - */ -public class VPackDriverModule implements VPackModule, VPackParserModule { - - @Override - public > void setup(final C context) { - context.fieldNamingStrategy(new VPackFieldNamingStrategy() { - @Override - public String translateName(final Field field) { - final DocumentField annotation = field.getAnnotation(DocumentField.class); - if (annotation != null) { - return annotation.value().getSerializeName(); - } - return field.getName(); - } - }); - context.registerSerializer(Request.class, VPackSerializers.REQUEST); - context.registerSerializer(AuthenticationRequest.class, VPackSerializers.AUTH_REQUEST); - context.registerSerializer(CollectionType.class, VPackSerializers.COLLECTION_TYPE); - context.registerSerializer(BaseDocument.class, VPackSerializers.BASE_DOCUMENT); - context.registerSerializer(BaseEdgeDocument.class, VPackSerializers.BASE_EDGE_DOCUMENT); - context.registerSerializer(TraversalOptions.Order.class, VPackSerializers.TRAVERSAL_ORDER); - context.registerSerializer(LogLevel.class, VPackSerializers.LOG_LEVEL); - context.registerSerializer(Permissions.class, VPackSerializers.PERMISSIONS); - - context.registerDeserializer(Response.class, VPackDeserializers.RESPONSE); - context.registerDeserializer(CollectionType.class, VPackDeserializers.COLLECTION_TYPE); - context.registerDeserializer(CollectionStatus.class, VPackDeserializers.COLLECTION_STATUS); - context.registerDeserializer(BaseDocument.class, VPackDeserializers.BASE_DOCUMENT); - context.registerDeserializer(BaseEdgeDocument.class, VPackDeserializers.BASE_EDGE_DOCUMENT); - context.registerDeserializer(QueryEntity.PROPERTY_STARTED, Date.class, VPackDeserializers.DATE_STRING); - context.registerDeserializer(LogLevel.class, VPackDeserializers.LOG_LEVEL); - context.registerDeserializer(ArangoDBVersion.License.class, VPackDeserializers.LICENSE); - context.registerDeserializer(Permissions.class, VPackDeserializers.PERMISSIONS); - context.registerDeserializer(QueryExecutionState.class, VPackDeserializers.QUERY_EXECUTION_STATE); - } - - @Override - public > void setup(final C context) { - - } - -} diff --git a/src/main/java/com/arangodb/internal/velocypack/VPackSerializers.java b/src/main/java/com/arangodb/internal/velocypack/VPackSerializers.java deleted file mode 100644 index 93d7a1aa1..000000000 --- a/src/main/java/com/arangodb/internal/velocypack/VPackSerializers.java +++ /dev/null @@ -1,174 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.internal.velocypack; - -import java.util.HashMap; -import java.util.Map; -import java.util.Map.Entry; - -import com.arangodb.entity.BaseDocument; -import com.arangodb.entity.BaseEdgeDocument; -import com.arangodb.entity.CollectionType; -import com.arangodb.entity.DocumentField; -import com.arangodb.entity.LogLevel; -import com.arangodb.entity.Permissions; -import com.arangodb.internal.velocystream.internal.AuthenticationRequest; -import com.arangodb.model.TraversalOptions; -import com.arangodb.model.TraversalOptions.Order; -import com.arangodb.velocypack.VPackBuilder; -import com.arangodb.velocypack.VPackSerializationContext; -import com.arangodb.velocypack.VPackSerializer; -import com.arangodb.velocypack.ValueType; -import com.arangodb.velocypack.exception.VPackException; -import com.arangodb.velocystream.Request; - -/** - * @author Mark Vollmary - * - */ -public class VPackSerializers { - - public static final VPackSerializer REQUEST = new VPackSerializer() { - @Override - public void serialize( - final VPackBuilder builder, - final String attribute, - final Request value, - final VPackSerializationContext context) throws VPackException { - builder.add(attribute, ValueType.ARRAY); - builder.add(value.getVersion()); - builder.add(value.getType()); - builder.add(value.getDatabase()); - builder.add(value.getRequestType().getType()); - builder.add(value.getRequest()); - builder.add(ValueType.OBJECT); - for (final Entry entry : value.getQueryParam().entrySet()) { - builder.add(entry.getKey(), entry.getValue()); - } - builder.close(); - builder.add(ValueType.OBJECT); - for (final Entry entry : value.getHeaderParam().entrySet()) { - builder.add(entry.getKey(), entry.getValue()); - } - builder.close(); - builder.close(); - } - }; - - public static final VPackSerializer AUTH_REQUEST = new VPackSerializer() { - @Override - public void serialize( - final VPackBuilder builder, - final String attribute, - final AuthenticationRequest value, - final VPackSerializationContext context) throws VPackException { - builder.add(attribute, ValueType.ARRAY); - builder.add(value.getVersion()); - builder.add(value.getType()); - builder.add(value.getEncryption()); - builder.add(value.getUser()); - builder.add(value.getPassword()); - builder.close(); - } - }; - - public static final VPackSerializer COLLECTION_TYPE = new VPackSerializer() { - @Override - public void serialize( - final VPackBuilder builder, - final String attribute, - final CollectionType value, - final VPackSerializationContext context) throws VPackException { - builder.add(attribute, value.getType()); - } - }; - - public static final VPackSerializer BASE_DOCUMENT = new VPackSerializer() { - @Override - public void serialize( - final VPackBuilder builder, - final String attribute, - final BaseDocument value, - final VPackSerializationContext context) throws VPackException { - final Map doc = new HashMap(); - doc.putAll(value.getProperties()); - doc.put(DocumentField.Type.ID.getSerializeName(), value.getId()); - doc.put(DocumentField.Type.KEY.getSerializeName(), value.getKey()); - doc.put(DocumentField.Type.REV.getSerializeName(), value.getRevision()); - context.serialize(builder, attribute, doc); - } - }; - - public static final VPackSerializer BASE_EDGE_DOCUMENT = new VPackSerializer() { - @Override - public void serialize( - final VPackBuilder builder, - final String attribute, - final BaseEdgeDocument value, - final VPackSerializationContext context) throws VPackException { - final Map doc = new HashMap(); - doc.putAll(value.getProperties()); - doc.put(DocumentField.Type.ID.getSerializeName(), value.getId()); - doc.put(DocumentField.Type.KEY.getSerializeName(), value.getKey()); - doc.put(DocumentField.Type.REV.getSerializeName(), value.getRevision()); - doc.put(DocumentField.Type.FROM.getSerializeName(), value.getFrom()); - doc.put(DocumentField.Type.TO.getSerializeName(), value.getTo()); - context.serialize(builder, attribute, doc); - } - }; - - public static final VPackSerializer TRAVERSAL_ORDER = new VPackSerializer() { - @Override - public void serialize( - final VPackBuilder builder, - final String attribute, - final Order value, - final VPackSerializationContext context) throws VPackException { - if (TraversalOptions.Order.preorder_expander == value) { - builder.add(attribute, "preorder-expander"); - } else { - builder.add(attribute, value.name()); - } - } - }; - - public static final VPackSerializer LOG_LEVEL = new VPackSerializer() { - @Override - public void serialize( - final VPackBuilder builder, - final String attribute, - final LogLevel value, - final VPackSerializationContext context) throws VPackException { - builder.add(attribute, value.getLevel()); - } - }; - - public static final VPackSerializer PERMISSIONS = new VPackSerializer() { - @Override - public void serialize( - final VPackBuilder builder, - final String attribute, - final Permissions value, - final VPackSerializationContext context) throws VPackException { - builder.add(attribute, value.toString().toLowerCase()); - } - }; -} diff --git a/src/main/java/com/arangodb/internal/velocystream/VstCommunication.java b/src/main/java/com/arangodb/internal/velocystream/VstCommunication.java deleted file mode 100644 index dedbbf109..000000000 --- a/src/main/java/com/arangodb/internal/velocystream/VstCommunication.java +++ /dev/null @@ -1,154 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.internal.velocystream; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collection; -import java.util.concurrent.atomic.AtomicLong; - -import javax.net.ssl.SSLContext; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import com.arangodb.ArangoDBException; -import com.arangodb.internal.ArangoDBConstants; -import com.arangodb.internal.Host; -import com.arangodb.internal.net.ArangoDBRedirectException; -import com.arangodb.internal.net.ConnectionPool; -import com.arangodb.internal.net.HostHandle; -import com.arangodb.internal.util.HostUtils; -import com.arangodb.internal.util.ResponseUtils; -import com.arangodb.internal.velocystream.internal.Chunk; -import com.arangodb.internal.velocystream.internal.Message; -import com.arangodb.internal.velocystream.internal.VstConnection; -import com.arangodb.util.ArangoSerialization; -import com.arangodb.velocypack.VPackSlice; -import com.arangodb.velocypack.exception.VPackParserException; -import com.arangodb.velocystream.Request; -import com.arangodb.velocystream.Response; - -/** - * @author Mark Vollmary - * - */ -public abstract class VstCommunication { - - private static final Logger LOGGER = LoggerFactory.getLogger(VstCommunication.class); - - protected static final AtomicLong mId = new AtomicLong(0L); - protected final ArangoSerialization util; - protected final ConnectionPool connectionPool; - - protected final String user; - protected final String password; - - protected final Integer chunksize; - - protected VstCommunication(final Integer timeout, final String user, final String password, final Boolean useSsl, - final SSLContext sslContext, final ArangoSerialization util, final Integer chunksize, - final ConnectionPool connectionPool) { - this.user = user; - this.password = password; - this.util = util; - this.connectionPool = connectionPool; - this.chunksize = chunksize != null ? chunksize : ArangoDBConstants.CHUNK_DEFAULT_CONTENT_SIZE; - } - - protected synchronized void connect(final C connection) { - if (!connection.isOpen()) { - try { - connection.open(); - if (user != null) { - authenticate(connection); - } - } catch (final IOException e) { - LOGGER.error(e.getMessage(), e); - throw new ArangoDBException(e); - } - } - } - - protected abstract void authenticate(final C connection); - - public void disconnect() throws IOException { - connectionPool.disconnect(); - } - - public R execute(final Request request, final HostHandle hostHandle) throws ArangoDBException { - final C connection = connectionPool.connection(hostHandle); - try { - return execute(request, connection); - } catch (final ArangoDBException e) { - if (e instanceof ArangoDBRedirectException) { - final String location = ArangoDBRedirectException.class.cast(e).getLocation(); - final Host host = HostUtils.createFromLocation(location); - connectionPool.closeConnectionOnError(connection); - return execute(request, new HostHandle().setHost(host)); - } else { - throw e; - } - } - } - - protected abstract R execute(final Request request, C connection) throws ArangoDBException; - - protected void checkError(final Response response) throws ArangoDBException { - ResponseUtils.checkError(util, response); - } - - protected Response createResponse(final Message message) throws VPackParserException { - final Response response = util.deserialize(message.getHead(), Response.class); - if (message.getBody() != null) { - response.setBody(message.getBody()); - } - return response; - } - - protected Message createMessage(final Request request) throws VPackParserException { - final long id = mId.incrementAndGet(); - return new Message(id, util.serialize(request), request.getBody()); - } - - protected Collection buildChunks(final Message message) { - final Collection chunks = new ArrayList(); - final VPackSlice head = message.getHead(); - int size = head.getByteSize(); - final VPackSlice body = message.getBody(); - if (body != null) { - size += body.getByteSize(); - } - final int n = size / chunksize; - final int numberOfChunks = (size % chunksize != 0) ? (n + 1) : n; - int off = 0; - for (int i = 0; size > 0; i++) { - final int len = Math.min(chunksize, size); - final long messageLength = (i == 0 && numberOfChunks > 1) ? size : -1L; - final Chunk chunk = new Chunk(message.getId(), i, numberOfChunks, messageLength, off, len); - size -= len; - off += len; - chunks.add(chunk); - } - return chunks; - } - -} diff --git a/src/main/java/com/arangodb/internal/velocystream/VstCommunicationSync.java b/src/main/java/com/arangodb/internal/velocystream/VstCommunicationSync.java deleted file mode 100644 index e90c5a6e6..000000000 --- a/src/main/java/com/arangodb/internal/velocystream/VstCommunicationSync.java +++ /dev/null @@ -1,166 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.internal.velocystream; - -import javax.net.ssl.SSLContext; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import com.arangodb.ArangoDBException; -import com.arangodb.internal.ArangoDBConstants; -import com.arangodb.internal.CollectionCache; -import com.arangodb.internal.Host; -import com.arangodb.internal.net.ConnectionPool; -import com.arangodb.internal.net.DelHostHandler; -import com.arangodb.internal.net.HostHandler; -import com.arangodb.internal.velocystream.internal.AuthenticationRequest; -import com.arangodb.internal.velocystream.internal.ConnectionSync; -import com.arangodb.internal.velocystream.internal.Message; -import com.arangodb.internal.velocystream.internal.MessageStore; -import com.arangodb.util.ArangoSerialization; -import com.arangodb.velocypack.exception.VPackParserException; -import com.arangodb.velocystream.Request; -import com.arangodb.velocystream.Response; - -/** - * @author Mark Vollmary - * - */ -public class VstCommunicationSync extends VstCommunication { - - private static final Logger LOGGER = LoggerFactory.getLogger(VstCommunicationSync.class); - private final CollectionCache collectionCache; - - public static class Builder { - - private final HostHandler hostHandler; - private Integer timeout; - private String user; - private String password; - private Boolean useSsl; - private SSLContext sslContext; - private Integer chunksize; - private Integer maxConnections; - - public Builder(final HostHandler hostHandler) { - super(); - this.hostHandler = hostHandler; - } - - public Builder(final Builder builder) { - this(builder.hostHandler); - timeout(builder.timeout).user(builder.user).password(builder.password).useSsl(builder.useSsl) - .sslContext(builder.sslContext).chunksize(builder.chunksize).maxConnections(builder.maxConnections); - } - - public Builder timeout(final Integer timeout) { - this.timeout = timeout; - return this; - } - - public Builder user(final String user) { - this.user = user; - return this; - } - - public Builder password(final String password) { - this.password = password; - return this; - } - - public Builder useSsl(final Boolean useSsl) { - this.useSsl = useSsl; - return this; - } - - public Builder sslContext(final SSLContext sslContext) { - this.sslContext = sslContext; - return this; - } - - public Builder chunksize(final Integer chunksize) { - this.chunksize = chunksize; - return this; - } - - public Builder maxConnections(final Integer maxConnections) { - this.maxConnections = maxConnections; - return this; - } - - public VstCommunication build( - final ArangoSerialization util, - final CollectionCache collectionCache) { - return new VstCommunicationSync(hostHandler, timeout, user, password, useSsl, sslContext, util, - collectionCache, chunksize, maxConnections); - } - - } - - protected VstCommunicationSync(final HostHandler hostHandler, final Integer timeout, final String user, - final String password, final Boolean useSsl, final SSLContext sslContext, final ArangoSerialization util, - final CollectionCache collectionCache, final Integer chunksize, final Integer maxConnections) { - super(timeout, user, password, useSsl, sslContext, util, chunksize, new ConnectionPool( - maxConnections != null ? Math.max(1, maxConnections) : ArangoDBConstants.MAX_CONNECTIONS_VST_DEFAULT) { - private final ConnectionSync.Builder builder = new ConnectionSync.Builder(new MessageStore()) - .timeout(timeout).useSsl(useSsl).sslContext(sslContext); - - @Override - public ConnectionSync createConnection(final Host host) { - return builder.hostHandler(new DelHostHandler(hostHandler, host)).build(); - } - }); - this.collectionCache = collectionCache; - } - - @Override - protected Response execute(final Request request, final ConnectionSync connection) throws ArangoDBException { - connect(connection); - try { - final Message requestMessage = createMessage(request); - final Message responseMessage = send(requestMessage, connection); - collectionCache.setDb(request.getDatabase()); - final Response response = createResponse(responseMessage); - checkError(response); - return response; - } catch (final VPackParserException e) { - throw new ArangoDBException(e); - } - } - - private Message send(final Message message, final ConnectionSync connection) throws ArangoDBException { - if (LOGGER.isDebugEnabled()) { - LOGGER.debug(String.format("Send Message (id=%s, head=%s, body=%s)", message.getId(), message.getHead(), - message.getBody() != null ? message.getBody() : "{}")); - } - return connection.write(message, buildChunks(message)); - } - - @Override - protected void authenticate(final ConnectionSync connection) { - final Response response = execute( - new AuthenticationRequest(user, password != null ? password : "", ArangoDBConstants.ENCRYPTION_PLAIN), - connection); - checkError(response); - } - -} diff --git a/src/main/java/com/arangodb/internal/velocystream/VstProtocol.java b/src/main/java/com/arangodb/internal/velocystream/VstProtocol.java deleted file mode 100644 index 02d9e42fc..000000000 --- a/src/main/java/com/arangodb/internal/velocystream/VstProtocol.java +++ /dev/null @@ -1,55 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.internal.velocystream; - -import java.io.IOException; - -import com.arangodb.ArangoDBException; -import com.arangodb.internal.net.CommunicationProtocol; -import com.arangodb.internal.net.HostHandle; -import com.arangodb.internal.velocystream.internal.ConnectionSync; -import com.arangodb.velocystream.Request; -import com.arangodb.velocystream.Response; - -/** - * @author Mark Vollmary - * - */ -public class VstProtocol implements CommunicationProtocol { - - private final VstCommunication communication; - - public VstProtocol(final VstCommunication communication) { - super(); - this.communication = communication; - } - - @Override - public Response execute(final Request request, final HostHandle hostHandle) throws ArangoDBException { - return communication.execute(request, hostHandle); - } - - @Override - public void close() throws IOException { - communication.disconnect(); - } - -} diff --git a/src/main/java/com/arangodb/internal/velocystream/internal/AuthenticationRequest.java b/src/main/java/com/arangodb/internal/velocystream/internal/AuthenticationRequest.java deleted file mode 100644 index 536c85626..000000000 --- a/src/main/java/com/arangodb/internal/velocystream/internal/AuthenticationRequest.java +++ /dev/null @@ -1,55 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.internal.velocystream.internal; - -import com.arangodb.velocystream.Request; - -/** - * @author Mark Vollmary - * - */ -public class AuthenticationRequest extends Request { - - private final String user; - private final String password; - private final String encryption;// "plain" - - public AuthenticationRequest(final String user, final String password, final String encryption) { - super(null, null, null); - this.user = user; - this.password = password; - this.encryption = encryption; - setType(1000); - } - - public String getUser() { - return user; - } - - public String getPassword() { - return password; - } - - public String getEncryption() { - return encryption; - } - -} diff --git a/src/main/java/com/arangodb/internal/velocystream/internal/Chunk.java b/src/main/java/com/arangodb/internal/velocystream/internal/Chunk.java deleted file mode 100644 index 814362893..000000000 --- a/src/main/java/com/arangodb/internal/velocystream/internal/Chunk.java +++ /dev/null @@ -1,89 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.internal.velocystream.internal; - -/** - * @author Mark Vollmary - * - */ -public class Chunk { - - private final long messageId; - private final long messageLength; - private final int chunkX; - private final int contentOffset; - private final int contentLength; - - public Chunk(final long messageId, final int chunkX, final long messageLength, final int contentOffset, - final int contentLength) { - this.messageId = messageId; - this.chunkX = chunkX; - this.messageLength = messageLength; - this.contentOffset = contentOffset; - this.contentLength = contentLength; - } - - public Chunk(final long messageId, final int chunkIndex, final int numberOfChunks, final long messageLength, - final int contentOffset, final int contentLength) { - this(messageId, chunkX(chunkIndex, numberOfChunks), messageLength, contentOffset, contentLength); - } - - private static int chunkX(final int chunkIndex, final int numberOfChunks) { - int chunkX; - if (numberOfChunks == 1) { - chunkX = 3;// last byte: 0000 0011 - } else if (chunkIndex == 0) { - chunkX = (numberOfChunks << 1) + 1; - } else { - chunkX = chunkIndex << 1; - } - return chunkX; - } - - public long getMessageId() { - return messageId; - } - - public long getMessageLength() { - return messageLength; - } - - public boolean isFirstChunk() { - return 1 == (chunkX & 0x1); - } - - public int getChunk() { - return chunkX >> 1; - } - - public int getChunkX() { - return chunkX; - } - - public int getContentOffset() { - return contentOffset; - } - - public int getContentLength() { - return contentLength; - } - -} diff --git a/src/main/java/com/arangodb/internal/velocystream/internal/ChunkStore.java b/src/main/java/com/arangodb/internal/velocystream/internal/ChunkStore.java deleted file mode 100644 index 1504203d3..000000000 --- a/src/main/java/com/arangodb/internal/velocystream/internal/ChunkStore.java +++ /dev/null @@ -1,71 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.internal.velocystream.internal; - -import java.nio.BufferUnderflowException; -import java.nio.ByteBuffer; -import java.util.HashMap; -import java.util.Map; - -/** - * @author Mark Vollmary - * - */ -public class ChunkStore { - - private final MessageStore messageStore; - private final Map data; - - public ChunkStore(final MessageStore messageStore) { - super(); - this.messageStore = messageStore; - data = new HashMap(); - } - - public ByteBuffer storeChunk(final Chunk chunk) throws BufferUnderflowException, IndexOutOfBoundsException { - final long messageId = chunk.getMessageId(); - ByteBuffer chunkBuffer = data.get(messageId); - if (chunkBuffer == null) { - if (!chunk.isFirstChunk()) { - messageStore.cancel(messageId); - return null; - } - final int length = (int) (chunk.getMessageLength() > 0 ? chunk.getMessageLength() - : chunk.getContentLength()); - chunkBuffer = ByteBuffer.allocate(length); - data.put(messageId, chunkBuffer); - } - return chunkBuffer; - } - - public void checkCompleteness(final long messageId) { - checkCompleteness(messageId, data.get(messageId)); - } - - private void checkCompleteness(final long messageId, final ByteBuffer chunkBuffer) - throws BufferUnderflowException, IndexOutOfBoundsException { - if (chunkBuffer.position() == chunkBuffer.limit()) { - messageStore.consume(new Message(messageId, chunkBuffer.array())); - data.remove(messageId); - } - } - -} diff --git a/src/main/java/com/arangodb/internal/velocystream/internal/ConnectionSync.java b/src/main/java/com/arangodb/internal/velocystream/internal/ConnectionSync.java deleted file mode 100644 index f1ecefb8d..000000000 --- a/src/main/java/com/arangodb/internal/velocystream/internal/ConnectionSync.java +++ /dev/null @@ -1,100 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.internal.velocystream.internal; - -import java.util.Collection; -import java.util.concurrent.Callable; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.FutureTask; - -import javax.net.ssl.SSLContext; - -import com.arangodb.ArangoDBException; -import com.arangodb.internal.net.HostHandler; - -/** - * @author Mark Vollmary - * - */ -public class ConnectionSync extends VstConnection { - - public static class Builder { - - private final MessageStore messageStore; - private HostHandler hostHandler; - private Integer timeout; - private Boolean useSsl; - private SSLContext sslContext; - - public Builder(final MessageStore messageStore) { - super(); - this.messageStore = messageStore; - } - - public Builder hostHandler(final HostHandler hostHandler) { - this.hostHandler = hostHandler; - return this; - } - - public Builder timeout(final Integer timeout) { - this.timeout = timeout; - return this; - } - - public Builder useSsl(final Boolean useSsl) { - this.useSsl = useSsl; - return this; - } - - public Builder sslContext(final SSLContext sslContext) { - this.sslContext = sslContext; - return this; - } - - public ConnectionSync build() { - return new ConnectionSync(hostHandler, timeout, useSsl, sslContext, messageStore); - } - } - - private ConnectionSync(final HostHandler hostHandler, final Integer timeout, final Boolean useSsl, - final SSLContext sslContext, final MessageStore messageStore) { - super(hostHandler, timeout, useSsl, sslContext, messageStore); - } - - public Message write(final Message message, final Collection chunks) throws ArangoDBException { - final FutureTask task = new FutureTask(new Callable() { - @Override - public Message call() throws Exception { - return messageStore.get(message.getId()); - } - }); - messageStore.storeMessage(message.getId(), task); - super.writeIntern(message, chunks); - try { - return task.get(); - } catch (final InterruptedException e) { - throw new ArangoDBException(e); - } catch (final ExecutionException e) { - throw new ArangoDBException(e); - } - } - -} diff --git a/src/main/java/com/arangodb/internal/velocystream/internal/Message.java b/src/main/java/com/arangodb/internal/velocystream/internal/Message.java deleted file mode 100644 index d0a5cac99..000000000 --- a/src/main/java/com/arangodb/internal/velocystream/internal/Message.java +++ /dev/null @@ -1,68 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.internal.velocystream.internal; - -import java.nio.BufferUnderflowException; - -import com.arangodb.velocypack.VPackSlice; - -/** - * @author Mark Vollmary - * - */ -public class Message { - - private final long id; - private final VPackSlice head; - private final VPackSlice body; - - public Message(final long id, final byte[] chunkBuffer) throws BufferUnderflowException, IndexOutOfBoundsException { - super(); - this.id = id; - head = new VPackSlice(chunkBuffer); - final int headSize = head.getByteSize(); - if (chunkBuffer.length > headSize) { - body = new VPackSlice(chunkBuffer, headSize); - } else { - body = null; - } - } - - public Message(final long id, final VPackSlice head, final VPackSlice body) { - super(); - this.id = id; - this.head = head; - this.body = body; - } - - public long getId() { - return id; - } - - public VPackSlice getHead() { - return head; - } - - public VPackSlice getBody() { - return body; - } - -} diff --git a/src/main/java/com/arangodb/internal/velocystream/internal/MessageStore.java b/src/main/java/com/arangodb/internal/velocystream/internal/MessageStore.java deleted file mode 100644 index 277562ac5..000000000 --- a/src/main/java/com/arangodb/internal/velocystream/internal/MessageStore.java +++ /dev/null @@ -1,111 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.internal.velocystream.internal; - -import java.util.Map; -import java.util.Map.Entry; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.FutureTask; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import com.arangodb.ArangoDBException; - -/** - * @author Mark Vollmary - * - */ -public class MessageStore { - - private static final Logger LOGGER = LoggerFactory.getLogger(MessageStore.class); - - private final Map> task; - private final Map response; - private final Map error; - - public MessageStore() { - super(); - task = new ConcurrentHashMap>(); - response = new ConcurrentHashMap(); - error = new ConcurrentHashMap(); - } - - public void storeMessage(final long messageId, final FutureTask future) { - task.put(messageId, future); - } - - public void consume(final Message message) { - final FutureTask future = task.remove(message.getId()); - if (future != null) { - if (LOGGER.isDebugEnabled()) { - LOGGER.debug(String.format("Received Message (id=%s, head=%s, body=%s)", message.getId(), - message.getHead(), message.getBody() != null ? message.getBody() : "{}")); - } - response.put(message.getId(), message); - future.run(); - } - } - - public Message get(final long messageId) throws ArangoDBException { - final Message result = response.remove(messageId); - if (result == null) { - final Exception e = error.remove(messageId); - if (e != null) { - throw new ArangoDBException(e); - } - } - return result; - } - - public void cancel(final long messageId) { - final FutureTask future = task.remove(messageId); - if (future != null) { - LOGGER.error(String.format("Cancel Message unexpected (id=%s).", messageId)); - future.cancel(true); - } - } - - public void clear(final Exception e) { - if (!task.isEmpty()) { - LOGGER.error(e.getMessage(), e); - } - for (final Entry> entry : task.entrySet()) { - if (LOGGER.isDebugEnabled()) { - LOGGER.debug(String.format("Exceptionally complete Message (id=%s).", entry.getKey())); - } - error.put(entry.getKey(), e); - entry.getValue().run(); - } - task.clear(); - } - - public void clear() { - for (final Entry> entry : task.entrySet()) { - if (LOGGER.isDebugEnabled()) { - LOGGER.debug(String.format("Cancel Message (id=%s).", entry.getKey())); - } - entry.getValue().cancel(true); - } - task.clear(); - } - -} diff --git a/src/main/java/com/arangodb/internal/velocystream/internal/VstConnection.java b/src/main/java/com/arangodb/internal/velocystream/internal/VstConnection.java deleted file mode 100644 index d1b3d4fc5..000000000 --- a/src/main/java/com/arangodb/internal/velocystream/internal/VstConnection.java +++ /dev/null @@ -1,293 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.internal.velocystream.internal; - -import java.io.BufferedOutputStream; -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; -import java.net.InetSocketAddress; -import java.net.Socket; -import java.nio.ByteBuffer; -import java.nio.ByteOrder; -import java.util.Collection; -import java.util.concurrent.Callable; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; - -import javax.net.SocketFactory; -import javax.net.ssl.SSLContext; -import javax.net.ssl.SSLSocket; -import javax.net.ssl.SSLSocketFactory; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import com.arangodb.ArangoDBException; -import com.arangodb.internal.ArangoDBConstants; -import com.arangodb.internal.Host; -import com.arangodb.internal.net.Connection; -import com.arangodb.internal.net.HostHandler; -import com.arangodb.velocypack.VPackSlice; - -/** - * @author Mark Vollmary - * - */ -public abstract class VstConnection implements Connection { - - private static final Logger LOGGER = LoggerFactory.getLogger(VstConnection.class); - private static final byte[] PROTOCOL_HEADER = "VST/1.0\r\n\r\n".getBytes(); - - private ExecutorService executor; - protected final MessageStore messageStore; - - private final HostHandler hostHandler; - private final Integer timeout; - private final Boolean useSsl; - private final SSLContext sslContext; - - private Socket socket; - private OutputStream outputStream; - private InputStream inputStream; - - private Host host; - - protected VstConnection(final HostHandler hostHandler, final Integer timeout, final Boolean useSsl, - final SSLContext sslContext, final MessageStore messageStore) { - super(); - this.hostHandler = hostHandler; - this.timeout = timeout; - this.useSsl = useSsl; - this.sslContext = sslContext; - this.messageStore = messageStore; - } - - @Override - public Host getHost() { - return host; - } - - public boolean isOpen() { - return socket != null && socket.isConnected() && !socket.isClosed(); - } - - public synchronized void open() throws IOException { - if (isOpen()) { - return; - } - host = hostHandler.get(); - while (true) { - if (host == null) { - throw new ArangoDBException("Was not able to connect to any host"); - } - if (LOGGER.isDebugEnabled()) { - LOGGER.debug(String.format("Open connection to %s", host)); - } - try { - if (useSsl != null && useSsl) { - if (sslContext != null) { - socket = sslContext.getSocketFactory().createSocket(); - } else { - socket = SSLSocketFactory.getDefault().createSocket(); - } - } else { - socket = SocketFactory.getDefault().createSocket(); - } - socket.connect(new InetSocketAddress(host.getHost(), host.getPort()), - timeout != null ? timeout : ArangoDBConstants.DEFAULT_TIMEOUT); - socket.setKeepAlive(true); - socket.setTcpNoDelay(true); - if (LOGGER.isDebugEnabled()) { - LOGGER.debug(String.format("Connected to %s", socket)); - } - - outputStream = new BufferedOutputStream(socket.getOutputStream()); - inputStream = socket.getInputStream(); - - if (useSsl != null && useSsl) { - if (LOGGER.isDebugEnabled()) { - LOGGER.debug(String.format("Start Handshake on %s", socket)); - } - ((SSLSocket) socket).startHandshake(); - } - hostHandler.success(); - break; - } catch (final IOException e) { - hostHandler.fail(); - final Host failedHost = host; - host = hostHandler.get(); - if (host != null) { - LOGGER.warn(String.format("Could not connect to %s or SSL Handshake failed. Try connecting to %s", - failedHost, host)); - } else { - throw e; - } - } - } - sendProtocolHeader(); - executor = Executors.newSingleThreadExecutor(); - executor.submit(new Callable() { - @Override - public Void call() throws Exception { - final ChunkStore chunkStore = new ChunkStore(messageStore); - while (true) { - if (!isOpen()) { - messageStore.clear(new IOException("The socket is closed.")); - close(); - break; - } - try { - final Chunk chunk = readChunk(); - final ByteBuffer chunkBuffer = chunkStore.storeChunk(chunk); - if (chunkBuffer != null) { - final byte[] buf = new byte[chunk.getContentLength()]; - readBytesIntoBuffer(buf, 0, buf.length); - chunkBuffer.put(buf); - chunkStore.checkCompleteness(chunk.getMessageId()); - } - } catch (final Exception e) { - messageStore.clear(e); - close(); - break; - } - } - return null; - } - }); - } - - @Override - public synchronized void close() { - messageStore.clear(); - if (executor != null && !executor.isShutdown()) { - executor.shutdown(); - } - if (socket != null && !socket.isClosed()) { - try { - if (LOGGER.isDebugEnabled()) { - LOGGER.debug(String.format("Close connection %s", socket)); - } - socket.close(); - } catch (final IOException e) { - throw new ArangoDBException(e); - } - } - } - - @Override - public synchronized void closeOnError() { - hostHandler.fail(); - close(); - } - - private synchronized void sendProtocolHeader() throws IOException { - if (LOGGER.isDebugEnabled()) { - LOGGER.debug(String.format("Send velocystream protocol header to %s", socket)); - } - outputStream.write(PROTOCOL_HEADER); - outputStream.flush(); - } - - protected synchronized void writeIntern(final Message message, final Collection chunks) - throws ArangoDBException { - for (final Chunk chunk : chunks) { - try { - if (LOGGER.isDebugEnabled()) { - LOGGER.debug(String.format("Send chunk %s:%s from message %s", chunk.getChunk(), - chunk.isFirstChunk() ? 1 : 0, chunk.getMessageId())); - } - writeChunkHead(chunk); - final int contentOffset = chunk.getContentOffset(); - final int contentLength = chunk.getContentLength(); - final VPackSlice head = message.getHead(); - final int headLength = head.getByteSize(); - int written = 0; - if (contentOffset < headLength) { - written = Math.min(contentLength, headLength - contentOffset); - outputStream.write(head.getBuffer(), contentOffset, written); - } - if (written < contentLength) { - final VPackSlice body = message.getBody(); - outputStream.write(body.getBuffer(), contentOffset + written - headLength, contentLength - written); - } - outputStream.flush(); - } catch (final IOException e) { - throw new ArangoDBException(e); - } - } - } - - private synchronized void writeChunkHead(final Chunk chunk) throws IOException { - final long messageLength = chunk.getMessageLength(); - final int headLength = messageLength > -1L ? ArangoDBConstants.CHUNK_MAX_HEADER_SIZE - : ArangoDBConstants.CHUNK_MIN_HEADER_SIZE; - final int length = chunk.getContentLength() + headLength; - final ByteBuffer buffer = ByteBuffer.allocate(headLength).order(ByteOrder.LITTLE_ENDIAN); - buffer.putInt(length); - buffer.putInt(chunk.getChunkX()); - buffer.putLong(chunk.getMessageId()); - if (messageLength > -1L) { - buffer.putLong(messageLength); - } - outputStream.write(buffer.array()); - } - - protected Chunk readChunk() throws IOException { - final ByteBuffer chunkHeadBuffer = readBytes(ArangoDBConstants.CHUNK_MIN_HEADER_SIZE); - final int length = chunkHeadBuffer.getInt(); - final int chunkX = chunkHeadBuffer.getInt(); - final long messageId = chunkHeadBuffer.getLong(); - final long messageLength; - final int contentLength; - if ((1 == (chunkX & 0x1)) && ((chunkX >> 1) > 1)) { - messageLength = readBytes(ArangoDBConstants.LONG_BYTES).getLong(); - contentLength = length - ArangoDBConstants.CHUNK_MAX_HEADER_SIZE; - } else { - messageLength = -1L; - contentLength = length - ArangoDBConstants.CHUNK_MIN_HEADER_SIZE; - } - final Chunk chunk = new Chunk(messageId, chunkX, messageLength, 0, contentLength); - if (LOGGER.isDebugEnabled()) { - LOGGER.debug(String.format("Received chunk %s:%s from message %s", chunk.getChunk(), - chunk.isFirstChunk() ? 1 : 0, chunk.getMessageId())); - } - return chunk; - } - - private ByteBuffer readBytes(final int len) throws IOException { - final byte[] buf = new byte[len]; - readBytesIntoBuffer(buf, 0, len); - return ByteBuffer.wrap(buf).order(ByteOrder.LITTLE_ENDIAN); - } - - protected void readBytesIntoBuffer(final byte[] buf, final int off, final int len) throws IOException { - for (int readed = 0; readed < len;) { - final int read = inputStream.read(buf, off + readed, len - readed); - if (read == -1) { - throw new IOException("Reached the end of the stream."); - } else { - readed += read; - } - } - } - -} diff --git a/src/main/java/com/arangodb/model/AqlFunctionCreateOptions.java b/src/main/java/com/arangodb/model/AqlFunctionCreateOptions.java deleted file mode 100644 index 12088c9ea..000000000 --- a/src/main/java/com/arangodb/model/AqlFunctionCreateOptions.java +++ /dev/null @@ -1,83 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.model; - -/** - * @author Mark Vollmary - * - * @see API - * Documentation - */ -public class AqlFunctionCreateOptions { - - private String name; - private String code; - private Boolean isDeterministic; - - public AqlFunctionCreateOptions() { - super(); - } - - /** - * @param name - * the fully qualified name of the user functions - * @return options - */ - protected AqlFunctionCreateOptions name(final String name) { - this.name = name; - return this; - } - - protected String getName() { - return name; - } - - /** - * @param code - * a string representation of the function body - * @return options - */ - protected AqlFunctionCreateOptions code(final String code) { - this.code = code; - return this; - } - - protected String getCode() { - return code; - } - - /** - * @param isDeterministic - * an optional boolean value to indicate that the function results are fully deterministic (function - * return value solely depends on the input value and return value is the same for repeated calls with - * same input) - * @return options - */ - public AqlFunctionCreateOptions isDeterministic(final Boolean isDeterministic) { - this.isDeterministic = isDeterministic; - return this; - } - - public Boolean getIsDeterministic() { - return isDeterministic; - } - -} diff --git a/src/main/java/com/arangodb/model/AqlFunctionDeleteOptions.java b/src/main/java/com/arangodb/model/AqlFunctionDeleteOptions.java deleted file mode 100644 index 8d428b96a..000000000 --- a/src/main/java/com/arangodb/model/AqlFunctionDeleteOptions.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.model; - -/** - * @author Mark Vollmary - * - * @see API - * Documentation - */ -public class AqlFunctionDeleteOptions { - - private Boolean group; - - public AqlFunctionDeleteOptions() { - super(); - } - - public Boolean getGroup() { - return group; - } - - /** - * @param group - * If set to true, then the function name provided in name is treated as a namespace prefix, and all - * functions in the specified namespace will be deleted. If set to false, the function name provided in - * name must be fully qualified, including any namespaces. - * @return options - */ - public AqlFunctionDeleteOptions group(final Boolean group) { - this.group = group; - return this; - } - -} diff --git a/src/main/java/com/arangodb/model/AqlFunctionGetOptions.java b/src/main/java/com/arangodb/model/AqlFunctionGetOptions.java deleted file mode 100644 index a7c91edb3..000000000 --- a/src/main/java/com/arangodb/model/AqlFunctionGetOptions.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.model; - -/** - * @author Mark Vollmary - * - * @see API - * Documentation - */ -public class AqlFunctionGetOptions { - - private String namespace; - - public AqlFunctionGetOptions() { - super(); - } - - public String getNamespace() { - return namespace; - } - - /** - * @param namespace - * Returns all registered AQL user functions from namespace namespace - * @return options - */ - public AqlFunctionGetOptions namespace(final String namespace) { - this.namespace = namespace; - return this; - } - -} diff --git a/src/main/java/com/arangodb/model/AqlQueryExplainOptions.java b/src/main/java/com/arangodb/model/AqlQueryExplainOptions.java deleted file mode 100644 index 506833be4..000000000 --- a/src/main/java/com/arangodb/model/AqlQueryExplainOptions.java +++ /dev/null @@ -1,137 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.model; - -import java.util.Collection; -import java.util.Map; - -/** - * @author Mark Vollmary - * - * @see API Documentation - */ -public class AqlQueryExplainOptions { - - private Map bindVars; - private String query; - private Options options; - - public AqlQueryExplainOptions() { - super(); - } - - protected Map getBindVars() { - return bindVars; - } - - /** - * @param bindVars - * key/value pairs representing the bind parameters - * @return options - */ - protected AqlQueryExplainOptions bindVars(final Map bindVars) { - this.bindVars = bindVars; - return this; - } - - protected String getQuery() { - return query; - } - - /** - * @param query - * the query which you want explained - * @return options - */ - protected AqlQueryExplainOptions query(final String query) { - this.query = query; - return this; - } - - public Integer getMaxNumberOfPlans() { - return getOptions().maxNumberOfPlans; - } - - /** - * @param maxNumberOfPlans - * an optional maximum number of plans that the optimizer is allowed to generate. Setting this attribute - * to a low value allows to put a cap on the amount of work the optimizer does. - * @return options - */ - public AqlQueryExplainOptions maxNumberOfPlans(final Integer maxNumberOfPlans) { - getOptions().maxNumberOfPlans = maxNumberOfPlans; - return this; - } - - public Boolean getAllPlans() { - return getOptions().allPlans; - } - - /** - * @param allPlans - * if set to true, all possible execution plans will be returned. The default is false, meaning only the - * optimal plan will be returned. - * @return options - */ - public AqlQueryExplainOptions allPlans(final Boolean allPlans) { - getOptions().allPlans = allPlans; - return this; - } - - public Collection getRules() { - return getOptions().getOptimizer().rules; - } - - /** - * @param rules - * an array of to-be-included or to-be-excluded optimizer rules can be put into this attribute, telling - * the optimizer to include or exclude specific rules. - * @return options - */ - public AqlQueryExplainOptions rules(final Collection rules) { - getOptions().getOptimizer().rules = rules; - return this; - } - - private Options getOptions() { - if (options == null) { - options = new Options(); - } - return options; - } - - private static class Options { - private Optimizer optimizer; - private Integer maxNumberOfPlans; - private Boolean allPlans; - - protected Optimizer getOptimizer() { - if (optimizer == null) { - optimizer = new Optimizer(); - } - return optimizer; - } - } - - private static class Optimizer { - private Collection rules; - } -} diff --git a/src/main/java/com/arangodb/model/AqlQueryOptions.java b/src/main/java/com/arangodb/model/AqlQueryOptions.java deleted file mode 100644 index a8de15c2d..000000000 --- a/src/main/java/com/arangodb/model/AqlQueryOptions.java +++ /dev/null @@ -1,381 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.model; - -import java.util.Collection; -import java.util.Map; - -/** - * @author Mark Vollmary - * - * @see API - * Documentation - */ -public class AqlQueryOptions { - - private Boolean count; - private Integer ttl; - private Integer batchSize; - private Boolean cache; - private Long memoryLimit; - private Map bindVars; - private String query; - private Options options; - - public AqlQueryOptions() { - super(); - } - - public Boolean getCount() { - return count; - } - - /** - * @param count - * indicates whether the number of documents in the result set should be returned in the "count" - * attribute of the result. Calculating the "count" attribute might have a performance impact for some - * queries in the future so this option is turned off by default, and "count" is only returned when - * requested. - * @return options - */ - public AqlQueryOptions count(final Boolean count) { - this.count = count; - return this; - } - - public Integer getTtl() { - return ttl; - } - - /** - * @param ttl - * The time-to-live for the cursor (in seconds). The cursor will be removed on the server automatically - * after the specified amount of time. This is useful to ensure garbage collection of cursors that are - * not fully fetched by clients. If not set, a server-defined value will be used. - * @return options - */ - public AqlQueryOptions ttl(final Integer ttl) { - this.ttl = ttl; - return this; - } - - public Integer getBatchSize() { - return batchSize; - } - - /** - * @param batchSize - * maximum number of result documents to be transferred from the server to the client in one roundtrip. - * If this attribute is not set, a server-controlled default value will be used. A batchSize value of 0 - * is disallowed. - * @return options - */ - public AqlQueryOptions batchSize(final Integer batchSize) { - this.batchSize = batchSize; - return this; - } - - public Long getMemoryLimit() { - return memoryLimit; - } - - /** - * @param memoryLimit - * the maximum number of memory (measured in bytes) that the query is allowed to use. If set, then the - * query will fail with error "resource limit exceeded" in case it allocates too much memory. A value of - * 0 indicates that there is no memory limit. - * @since ArangoDB 3.1.0 - * @return options - */ - public AqlQueryOptions memoryLimit(final Long memoryLimit) { - this.memoryLimit = memoryLimit; - return this; - } - - public Boolean getCache() { - return cache; - } - - /** - * @param cache - * flag to determine whether the AQL query cache shall be used. If set to false, then any query cache - * lookup will be skipped for the query. If set to true, it will lead to the query cache being checked - * for the query if the query cache mode is either on or demand. - * @return options - */ - public AqlQueryOptions cache(final Boolean cache) { - this.cache = cache; - return this; - } - - protected Map getBindVars() { - return bindVars; - } - - /** - * @param bindVars - * key/value pairs representing the bind parameters - * @return options - */ - protected AqlQueryOptions bindVars(final Map bindVars) { - this.bindVars = bindVars; - return this; - } - - protected String getQuery() { - return query; - } - - /** - * @param query - * the query which you want parse - * @return options - */ - protected AqlQueryOptions query(final String query) { - this.query = query; - return this; - } - - public Boolean getFailOnWarning() { - return options != null ? options.failOnWarning : null; - } - - /** - * @param failOnWarning - * When set to true, the query will throw an exception and abort instead of producing a warning. This - * option should be used during development to catch potential issues early. When the attribute is set to - * false, warnings will not be propagated to exceptions and will be returned with the query result. There - * is also a server configuration option --query.fail-on-warning for setting the default value for - * failOnWarning so it does not need to be set on a per-query level. - * @return options - */ - public AqlQueryOptions failOnWarning(final Boolean failOnWarning) { - getOptions().failOnWarning = failOnWarning; - return this; - } - - /** - * @return If set to true, then the additional query profiling information will be returned in the sub-attribute - * profile of the extra return attribute if the query result is not served from the query cache. - */ - public Boolean getProfile() { - return options != null ? options.profile : null; - } - - /** - * @param profile - * If set to true, then the additional query profiling information will be returned in the sub-attribute - * profile of the extra return attribute if the query result is not served from the query cache. - * @return options - */ - public AqlQueryOptions profile(final Boolean profile) { - getOptions().profile = profile; - return this; - } - - public Long getMaxTransactionSize() { - return options != null ? options.maxTransactionSize : null; - } - - /** - * @param maxTransactionSize - * Transaction size limit in bytes. Honored by the RocksDB storage engine only. - * @since ArangoDB 3.2.0 - * @return options - */ - public AqlQueryOptions maxTransactionSize(final Long maxTransactionSize) { - getOptions().maxTransactionSize = maxTransactionSize; - return this; - } - - public Long getMaxWarningCount() { - return options != null ? options.maxWarningCount : null; - } - - /** - * @param maxWarningCount - * Limits the maximum number of warnings a query will return. The number of warnings a query will return - * is limited to 10 by default, but that number can be increased or decreased by setting this attribute. - * @since ArangoDB 3.2.0 - * @return options - */ - public AqlQueryOptions maxWarningCount(final Long maxWarningCount) { - getOptions().maxWarningCount = maxWarningCount; - return this; - } - - public Long getIntermediateCommitCount() { - return options != null ? options.intermediateCommitCount : null; - } - - /** - * @param intermediateCommitCount - * Maximum number of operations after which an intermediate commit is performed automatically. Honored by - * the RocksDB storage engine only. - * @since ArangoDB 3.2.0 - * @return options - */ - public AqlQueryOptions intermediateCommitCount(final Long intermediateCommitCount) { - getOptions().intermediateCommitCount = intermediateCommitCount; - return this; - } - - public Long getIntermediateCommitSize() { - return options != null ? options.intermediateCommitSize : null; - } - - /** - * @param intermediateCommitSize - * Maximum total size of operations after which an intermediate commit is performed automatically. - * Honored by the RocksDB storage engine only. - * @since ArangoDB 3.2.0 - * @return options - */ - public AqlQueryOptions intermediateCommitSize(final Long intermediateCommitSize) { - getOptions().intermediateCommitSize = intermediateCommitSize; - return this; - } - - public Double getSatelliteSyncWait() { - return options != null ? options.satelliteSyncWait : null; - } - - /** - * @param satelliteSyncWait - * This enterprise parameter allows to configure how long a DBServer will have time to bring the - * satellite collections involved in the query into sync. The default value is 60.0 (seconds). When the - * max time has been reached the query will be stopped. - * @since ArangoDB 3.2.0 - * @return options - */ - public AqlQueryOptions satelliteSyncWait(final Double satelliteSyncWait) { - getOptions().satelliteSyncWait = satelliteSyncWait; - return this; - } - - public Boolean getSkipInaccessibleCollections() { - return options != null ? options.skipInaccessibleCollections : null; - } - - /** - * @param skipInaccessibleCollections - * AQL queries (especially graph traversals) will treat collection to which a user has no access rights - * as if these collections were empty. Instead of returning a forbidden access error, your queries will - * execute normally. This is intended to help with certain use-cases: A graph contains several - * collections and different users execute AQL queries on that graph. You can now naturally limit the - * accessible results by changing the access rights of users on collections. This feature is only - * available in the Enterprise Edition. - * @since ArangoDB 3.2.0 - * @return options - */ - public AqlQueryOptions skipInaccessibleCollections(final Boolean skipInaccessibleCollections) { - getOptions().skipInaccessibleCollections = skipInaccessibleCollections; - return this; - } - - public Boolean getFullCount() { - return options != null ? options.fullCount : null; - } - - /** - * @param fullCount - * if set to true and the query contains a LIMIT clause, then the result will have an extra attribute - * with the sub-attributes stats and fullCount, { ... , "extra": { "stats": { "fullCount": 123 } } }. The - * fullCount attribute will contain the number of documents in the result before the last LIMIT in the - * query was applied. It can be used to count the number of documents that match certain filter criteria, - * but only return a subset of them, in one go. It is thus similar to MySQL's SQL_CALC_FOUND_ROWS hint. - * Note that setting the option will disable a few LIMIT optimizations and may lead to more documents - * being processed, and thus make queries run longer. Note that the fullCount attribute will only be - * present in the result if the query has a LIMIT clause and the LIMIT clause is actually used in the - * query. - * @return options - */ - public AqlQueryOptions fullCount(final Boolean fullCount) { - getOptions().fullCount = fullCount; - return this; - } - - public Integer getMaxPlans() { - return options != null ? options.maxPlans : null; - } - - /** - * - * @param maxPlans - * Limits the maximum number of plans that are created by the AQL query optimizer. - * @return options - */ - public AqlQueryOptions maxPlans(final Integer maxPlans) { - getOptions().maxPlans = maxPlans; - return this; - } - - public Collection getRules() { - return options != null ? options.optimizer != null ? options.optimizer.rules : null : null; - } - - /** - * - * @param rules - * A list of to-be-included or to-be-excluded optimizer rules can be put into this attribute, telling the - * optimizer to include or exclude specific rules. To disable a rule, prefix its name with a -, to enable - * a rule, prefix it with a +. There is also a pseudo-rule all, which will match all optimizer rules - * @return options - */ - public AqlQueryOptions rules(final Collection rules) { - getOptions().getOptimizer().rules = rules; - return this; - } - - private Options getOptions() { - if (options == null) { - options = new Options(); - } - return options; - } - - private static class Options { - private Boolean failOnWarning; - private Boolean profile; - private Long maxTransactionSize; - private Long maxWarningCount; - private Long intermediateCommitCount; - private Long intermediateCommitSize; - private Double satelliteSyncWait; - private Boolean skipInaccessibleCollections; - private Optimizer optimizer; - private Boolean fullCount; - private Integer maxPlans; - - protected Optimizer getOptimizer() { - if (optimizer == null) { - optimizer = new Optimizer(); - } - return optimizer; - } - - } - - private static class Optimizer { - private Collection rules; - } - -} diff --git a/src/main/java/com/arangodb/model/CollectionCreateOptions.java b/src/main/java/com/arangodb/model/CollectionCreateOptions.java deleted file mode 100644 index a8604c19b..000000000 --- a/src/main/java/com/arangodb/model/CollectionCreateOptions.java +++ /dev/null @@ -1,279 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.model; - -import com.arangodb.entity.CollectionType; -import com.arangodb.entity.KeyOptions; -import com.arangodb.entity.KeyType; - -/** - * @author Mark Vollmary - * - * @see API - * Documentation - */ -public class CollectionCreateOptions { - - private String name; - private Long journalSize; - private Integer replicationFactor; - private KeyOptions keyOptions; - private Boolean waitForSync; - private Boolean doCompact; - private Boolean isVolatile; - private String[] shardKeys; - private Integer numberOfShards; - private Boolean isSystem; - private CollectionType type; - private Integer indexBuckets; - private String distributeShardsLike; - - public CollectionCreateOptions() { - super(); - } - - protected String getName() { - return name; - } - - /** - * @param name - * The name of the collection - * @return options - */ - protected CollectionCreateOptions name(final String name) { - this.name = name; - return this; - } - - public Long getJournalSize() { - return journalSize; - } - - /** - * @param journalSize - * The maximal size of a journal or datafile in bytes. The value must be at least 1048576 (1 MiB). - * @return options - */ - public CollectionCreateOptions journalSize(final Long journalSize) { - this.journalSize = journalSize; - return this; - } - - public Integer getReplicationFactor() { - return replicationFactor; - } - - /** - * @param replicationFactor - * (The default is 1): in a cluster, this attribute determines how many copies of each shard are kept on - * different DBServers. The value 1 means that only one copy (no synchronous replication) is kept. A - * value of k means that k-1 replicas are kept. Any two copies reside on different DBServers. Replication - * between them is synchronous, that is, every write operation to the "leader" copy will be replicated to - * all "follower" replicas, before the write operation is reported successful. If a server fails, this is - * detected automatically and one of the servers holding copies take over, usually without an error being - * reported. - * @return options - */ - public CollectionCreateOptions replicationFactor(final Integer replicationFactor) { - this.replicationFactor = replicationFactor; - return this; - } - - public KeyOptions getKeyOptions() { - return keyOptions; - } - - /** - * @param allowUserKeys - * if set to true, then it is allowed to supply own key values in the _key attribute of a document. If - * set to false, then the key generator will solely be responsible for generating keys and supplying own - * key values in the _key attribute of documents is considered an error. - * @param type - * specifies the type of the key generator. The currently available generators are traditional and - * autoincrement. - * @param increment - * increment value for autoincrement key generator. Not used for other key generator types. - * @param offset - * Initial offset value for autoincrement key generator. Not used for other key generator types. - * @return options - */ - public CollectionCreateOptions keyOptions( - final Boolean allowUserKeys, - final KeyType type, - final Integer increment, - final Integer offset) { - this.keyOptions = new KeyOptions(allowUserKeys, type, increment, offset); - return this; - } - - public Boolean getWaitForSync() { - return waitForSync; - } - - /** - * @param waitForSync - * If true then the data is synchronized to disk before returning from a document create, update, replace - * or removal operation. (default: false) - * @return options - */ - public CollectionCreateOptions waitForSync(final Boolean waitForSync) { - this.waitForSync = waitForSync; - return this; - } - - public Boolean getDoCompact() { - return doCompact; - } - - /** - * @param doCompact - * whether or not the collection will be compacted (default is true) - * @return options - */ - public CollectionCreateOptions doCompact(final Boolean doCompact) { - this.doCompact = doCompact; - return this; - } - - public Boolean getIsVolatile() { - return isVolatile; - } - - /** - * @param isVolatile - * If true then the collection data is kept in-memory only and not made persistent. Unloading the - * collection will cause the collection data to be discarded. Stopping or re-starting the server will - * also cause full loss of data in the collection. Setting this option will make the resulting collection - * be slightly faster than regular collections because ArangoDB does not enforce any synchronization to - * disk and does not calculate any CRC checksums for datafiles (as there are no datafiles). This option - * should therefore be used for cache-type collections only, and not for data that cannot be re-created - * otherwise. (The default is false) - * @return options - */ - public CollectionCreateOptions isVolatile(final Boolean isVolatile) { - this.isVolatile = isVolatile; - return this; - } - - public String[] getShardKeys() { - return shardKeys; - } - - /** - * @param shardKeys - * (The default is [ "_key" ]): in a cluster, this attribute determines which document attributes are - * used to determine the target shard for documents. Documents are sent to shards based on the values of - * their shard key attributes. The values of all shard key attributes in a document are hashed, and the - * hash value is used to determine the target shard. Note: Values of shard key attributes cannot be - * changed once set. This option is meaningless in a single server setup. - * @return options - */ - public CollectionCreateOptions shardKeys(final String... shardKeys) { - this.shardKeys = shardKeys; - return this; - } - - public Integer getNumberOfShards() { - return numberOfShards; - } - - /** - * @param numberOfShards - * (The default is 1): in a cluster, this value determines the number of shards to create for the - * collection. In a single server setup, this option is meaningless. - * @return options - */ - public CollectionCreateOptions numberOfShards(final Integer numberOfShards) { - this.numberOfShards = numberOfShards; - return this; - } - - public Boolean getIsSystem() { - return isSystem; - } - - /** - * @param isSystem - * If true, create a system collection. In this case collection-name should start with an underscore. End - * users should normally create non-system collections only. API implementors may be required to create - * system collections in very special occasions, but normally a regular collection will do. (The default - * is false) - * @return options - */ - public CollectionCreateOptions isSystem(final Boolean isSystem) { - this.isSystem = isSystem; - return this; - } - - public CollectionType getType() { - return type; - } - - /** - * @param type - * (The default is {@link CollectionType#DOCUMENT}): the type of the collection to create. - * @return options - */ - public CollectionCreateOptions type(final CollectionType type) { - this.type = type; - return this; - } - - public Integer getIndexBuckets() { - return indexBuckets; - } - - /** - * @param indexBuckets - * The: number of buckets into which indexes using a hash table are split. The default is 16 and this - * number has to be a power of 2 and less than or equal to 1024. For very large collections one should - * increase this to avoid long pauses when the hash table has to be initially built or resized, since - * buckets are resized individually and can be initially built in parallel. For example, 64 might be a - * sensible value for a collection with 100 000 000 documents. Currently, only the edge index respects - * this value, but other index types might follow in future ArangoDB versions. Changes (see below) are - * applied when the collection is loaded the next time. - * @return options - */ - public CollectionCreateOptions indexBuckets(final Integer indexBuckets) { - this.indexBuckets = indexBuckets; - return this; - } - - public String getDistributeShardsLike() { - return distributeShardsLike; - } - - /** - * @param distributeShardsLike - * (The default is ""): in an enterprise cluster, this attribute binds the specifics of sharding for the - * newly created collection to follow that of a specified existing collection. Note: Using this parameter - * has consequences for the prototype collection. It can no longer be dropped, before sharding imitating - * collections are dropped. Equally, backups and restores of imitating collections alone will generate - * warnings, which can be overridden, about missing sharding prototype. - * @return options - */ - public CollectionCreateOptions distributeShardsLike(final String distributeShardsLike) { - this.distributeShardsLike = distributeShardsLike; - return this; - } - -} diff --git a/src/main/java/com/arangodb/model/CollectionPropertiesOptions.java b/src/main/java/com/arangodb/model/CollectionPropertiesOptions.java deleted file mode 100644 index 410409fc9..000000000 --- a/src/main/java/com/arangodb/model/CollectionPropertiesOptions.java +++ /dev/null @@ -1,68 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.model; - -/** - * @author Mark Vollmary - * - * @see API - * Documentation - */ -public class CollectionPropertiesOptions { - - private Boolean waitForSync; - private Long journalSize; - - public CollectionPropertiesOptions() { - super(); - } - - public Boolean getWaitForSync() { - return waitForSync; - } - - /** - * @param waitForSync - * If true then creating or changing a document will wait until the data has been synchronized to disk. - * @return options - */ - public CollectionPropertiesOptions waitForSync(final Boolean waitForSync) { - this.waitForSync = waitForSync; - return this; - } - - public Long getJournalSize() { - return journalSize; - } - - /** - * @param journalSize - * The maximal size of a journal or datafile in bytes. The value must be at least 1048576 (1 MB). Note - * that when changing the journalSize value, it will only have an effect for additional journals or - * datafiles that are created. Already existing journals or datafiles will not be affected. - * @return options - */ - public CollectionPropertiesOptions journalSize(final Long journalSize) { - this.journalSize = journalSize; - return this; - } - -} diff --git a/src/main/java/com/arangodb/model/DocumentCreateOptions.java b/src/main/java/com/arangodb/model/DocumentCreateOptions.java deleted file mode 100644 index dba68dbd9..000000000 --- a/src/main/java/com/arangodb/model/DocumentCreateOptions.java +++ /dev/null @@ -1,66 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.model; - -/** - * @author Mark Vollmary - * - * @see API - * Documentation - */ -public class DocumentCreateOptions { - - private Boolean waitForSync; - private Boolean returnNew; - - public DocumentCreateOptions() { - super(); - } - - public Boolean getWaitForSync() { - return waitForSync; - } - - /** - * @param waitForSync - * Wait until document has been synced to disk. - * @return options - */ - public DocumentCreateOptions waitForSync(final Boolean waitForSync) { - this.waitForSync = waitForSync; - return this; - } - - public Boolean getReturnNew() { - return returnNew; - } - - /** - * @param returnNew - * Return additionally the complete new document under the attribute new in the result. - * @return options - */ - public DocumentCreateOptions returnNew(final Boolean returnNew) { - this.returnNew = returnNew; - return this; - } - -} diff --git a/src/main/java/com/arangodb/model/DocumentDeleteOptions.java b/src/main/java/com/arangodb/model/DocumentDeleteOptions.java deleted file mode 100644 index b81e37bf3..000000000 --- a/src/main/java/com/arangodb/model/DocumentDeleteOptions.java +++ /dev/null @@ -1,82 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.model; - -/** - * @author Mark Vollmary - * - * @see API - * Documentation - */ -public class DocumentDeleteOptions { - - private Boolean waitForSync; - private String ifMatch; - private Boolean returnOld; - - public DocumentDeleteOptions() { - super(); - } - - public Boolean getWaitForSync() { - return waitForSync; - } - - /** - * @param waitForSync - * Wait until deletion operation has been synced to disk. - * @return options - */ - public DocumentDeleteOptions waitForSync(final Boolean waitForSync) { - this.waitForSync = waitForSync; - return this; - } - - public String getIfMatch() { - return ifMatch; - } - - /** - * @param ifMatch - * remove a document based on a target revision - * @return options - */ - public DocumentDeleteOptions ifMatch(final String ifMatch) { - this.ifMatch = ifMatch; - return this; - } - - public Boolean getReturnOld() { - return returnOld; - } - - /** - * @param returnOld - * Return additionally the complete previous revision of the changed document under the attribute old in - * the result. - * @return options - */ - public DocumentDeleteOptions returnOld(final Boolean returnOld) { - this.returnOld = returnOld; - return this; - } - -} diff --git a/src/main/java/com/arangodb/model/DocumentExistsOptions.java b/src/main/java/com/arangodb/model/DocumentExistsOptions.java deleted file mode 100644 index 17d7784b5..000000000 --- a/src/main/java/com/arangodb/model/DocumentExistsOptions.java +++ /dev/null @@ -1,82 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.model; - -/** - * @author Mark Vollmary - * - * @see API - * Documentation - */ -public class DocumentExistsOptions { - - private String ifNoneMatch; - private String ifMatch; - private boolean catchException; - - public DocumentExistsOptions() { - super(); - catchException = true; - } - - public String getIfNoneMatch() { - return ifNoneMatch; - } - - /** - * @param ifNoneMatch - * document revision must not contain If-None-Match - * @return options - */ - public DocumentExistsOptions ifNoneMatch(final String ifNoneMatch) { - this.ifNoneMatch = ifNoneMatch; - return this; - } - - public String getIfMatch() { - return ifMatch; - } - - /** - * @param ifMatch - * document revision must contain If-Match - * @return options - */ - public DocumentExistsOptions ifMatch(final String ifMatch) { - this.ifMatch = ifMatch; - return this; - } - - public boolean isCatchException() { - return catchException; - } - - /** - * @param catchException - * whether or not catch possible thrown exceptions - * @return options - */ - public DocumentExistsOptions catchException(final boolean catchException) { - this.catchException = catchException; - return this; - } - -} diff --git a/src/main/java/com/arangodb/model/DocumentImportOptions.java b/src/main/java/com/arangodb/model/DocumentImportOptions.java deleted file mode 100644 index 6bd658386..000000000 --- a/src/main/java/com/arangodb/model/DocumentImportOptions.java +++ /dev/null @@ -1,161 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.model; - -/** - * @author Mark Vollmary - * - */ -public class DocumentImportOptions { - - public enum OnDuplicate { - error, update, replace, ignore - } - - private String fromPrefix; - private String toPrefix; - private Boolean overwrite; - private Boolean waitForSync; - private OnDuplicate onDuplicate; - private Boolean complete; - private Boolean details; - - public DocumentImportOptions() { - super(); - } - - public String getFromPrefix() { - return fromPrefix; - } - - /** - * @param fromPrefix - * An optional prefix for the values in _from attributes. If specified, the value is automatically - * prepended to each _from input value. This allows specifying just the keys for _from. - * @return options - */ - public DocumentImportOptions fromPrefix(final String fromPrefix) { - this.fromPrefix = fromPrefix; - return this; - } - - public String getToPrefix() { - return toPrefix; - } - - /** - * @param toPrefix - * An optional prefix for the values in _to attributes. If specified, the value is automatically - * prepended to each _to input value. This allows specifying just the keys for _to. - * @return options - */ - public DocumentImportOptions toPrefix(final String toPrefix) { - this.toPrefix = toPrefix; - return this; - } - - public Boolean getOverwrite() { - return overwrite; - } - - /** - * @param overwrite - * If this parameter has a value of true, then all data in the collection will be removed prior to the - * import. Note that any existing index definitions will be preseved. - * @return options - */ - public DocumentImportOptions overwrite(final Boolean overwrite) { - this.overwrite = overwrite; - return this; - } - - public Boolean getWaitForSync() { - return waitForSync; - } - - /** - * @param waitForSync - * Wait until documents have been synced to disk before returning. - * @return options - */ - public DocumentImportOptions waitForSync(final Boolean waitForSync) { - this.waitForSync = waitForSync; - return this; - } - - public OnDuplicate getOnDuplicate() { - return onDuplicate; - } - - /** - * @param onDuplicate - * Controls what action is carried out in case of a unique key constraint violation. Possible values are: - *
    - *
  • error: this will not import the current document because of the unique key constraint violation. - * This is the default setting.
  • - *
  • update: this will update an existing document in the database with the data specified in the - * request. Attributes of the existing document that are not present in the request will be - * preseved.
  • - *
  • replace: this will replace an existing document in the database with the data specified in the - * request.
  • - *
  • ignore: this will not update an existing document and simply ignore the error caused by the unique - * key constraint violation. Note that update, replace and ignore will only work when the import document - * in the request contains the _key attribute. update and replace may also fail because of secondary - * unique key constraint violations.
  • - *
- * @return options - */ - public DocumentImportOptions onDuplicate(final OnDuplicate onDuplicate) { - this.onDuplicate = onDuplicate; - return this; - } - - public Boolean getComplete() { - return complete; - } - - /** - * @param complete - * If set to true, it will make the whole import fail if any error occurs. Otherwise the import will - * continue even if some documents cannot be imported. - * @return options - */ - public DocumentImportOptions complete(final Boolean complete) { - this.complete = complete; - return this; - } - - public Boolean getDetails() { - return details; - } - - /** - * @param details - * If set to true, the result will include an attribute details with details about documents that could - * not be imported. - * @return options - */ - public DocumentImportOptions details(final Boolean details) { - this.details = details; - return this; - } - -} diff --git a/src/main/java/com/arangodb/model/DocumentReadOptions.java b/src/main/java/com/arangodb/model/DocumentReadOptions.java deleted file mode 100644 index 5f9b40dee..000000000 --- a/src/main/java/com/arangodb/model/DocumentReadOptions.java +++ /dev/null @@ -1,82 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.model; - -/** - * @author Mark Vollmary - * - * @see API - * Documentation - */ -public class DocumentReadOptions { - - private String ifNoneMatch; - private String ifMatch; - private boolean catchException; - - public DocumentReadOptions() { - super(); - catchException = true; - } - - public String getIfNoneMatch() { - return ifNoneMatch; - } - - /** - * @param ifNoneMatch - * document revision must not contain If-None-Match - * @return options - */ - public DocumentReadOptions ifNoneMatch(final String ifNoneMatch) { - this.ifNoneMatch = ifNoneMatch; - return this; - } - - public String getIfMatch() { - return ifMatch; - } - - /** - * @param ifMatch - * document revision must contain If-Match - * @return options - */ - public DocumentReadOptions ifMatch(final String ifMatch) { - this.ifMatch = ifMatch; - return this; - } - - public boolean isCatchException() { - return catchException; - } - - /** - * @param catchException - * whether or not catch possible thrown exceptions - * @return options - */ - public DocumentReadOptions catchException(final boolean catchException) { - this.catchException = catchException; - return this; - } - -} diff --git a/src/main/java/com/arangodb/model/DocumentReplaceOptions.java b/src/main/java/com/arangodb/model/DocumentReplaceOptions.java deleted file mode 100644 index 047517ca4..000000000 --- a/src/main/java/com/arangodb/model/DocumentReplaceOptions.java +++ /dev/null @@ -1,114 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.model; - -/** - * @author Mark Vollmary - * - * @see API - * Documentation - */ -public class DocumentReplaceOptions { - - private Boolean waitForSync; - private Boolean ignoreRevs; - private String ifMatch; - private Boolean returnNew; - private Boolean returnOld; - - public DocumentReplaceOptions() { - super(); - } - - public Boolean getWaitForSync() { - return waitForSync; - } - - /** - * @param waitForSync - * Wait until document has been synced to disk. - * @return options - */ - public DocumentReplaceOptions waitForSync(final Boolean waitForSync) { - this.waitForSync = waitForSync; - return this; - } - - public Boolean getIgnoreRevs() { - return ignoreRevs; - } - - /** - * @param ignoreRevs - * By default, or if this is set to true, the _rev attributes in the given document is ignored. If this - * is set to false, then the _rev attribute given in the body document is taken as a precondition. The - * document is only replaced if the current revision is the one specified. - * @return options - */ - public DocumentReplaceOptions ignoreRevs(final Boolean ignoreRevs) { - this.ignoreRevs = ignoreRevs; - return this; - } - - public String getIfMatch() { - return ifMatch; - } - - /** - * @param ifMatch - * replace a document based on target revision - * @return options - */ - public DocumentReplaceOptions ifMatch(final String ifMatch) { - this.ifMatch = ifMatch; - return this; - } - - public Boolean getReturnNew() { - return returnNew; - } - - /** - * @param returnNew - * Return additionally the complete new document under the attribute new in the result. - * @return options - */ - public DocumentReplaceOptions returnNew(final Boolean returnNew) { - this.returnNew = returnNew; - return this; - } - - public Boolean getReturnOld() { - return returnOld; - } - - /** - * @param returnOld - * Return additionally the complete previous revision of the changed document under the attribute old in - * the result. - * @return options - */ - public DocumentReplaceOptions returnOld(final Boolean returnOld) { - this.returnOld = returnOld; - return this; - } - -} diff --git a/src/main/java/com/arangodb/model/DocumentUpdateOptions.java b/src/main/java/com/arangodb/model/DocumentUpdateOptions.java deleted file mode 100644 index c384419f5..000000000 --- a/src/main/java/com/arangodb/model/DocumentUpdateOptions.java +++ /dev/null @@ -1,166 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.model; - -/** - * @author Mark Vollmary - * - * @see API - * Documentation - */ -public class DocumentUpdateOptions { - - private Boolean keepNull; - private Boolean mergeObjects; - private Boolean waitForSync; - private Boolean ignoreRevs; - private String ifMatch; - private Boolean returnNew; - private Boolean returnOld; - private Boolean serializeNull; - - public DocumentUpdateOptions() { - super(); - } - - public Boolean getKeepNull() { - return keepNull; - } - - /** - * @param keepNull - * If the intention is to delete existing attributes with the patch command, the URL query parameter - * keepNull can be used with a value of false. This will modify the behavior of the patch command to - * remove any attributes from the existing document that are contained in the patch document with an - * attribute value of null. - * @return options - */ - public DocumentUpdateOptions keepNull(final Boolean keepNull) { - this.keepNull = keepNull; - return this; - } - - public Boolean getMergeObjects() { - return mergeObjects; - } - - /** - * @param mergeObjects - * Controls whether objects (not arrays) will be merged if present in both the existing and the patch - * document. If set to false, the value in the patch document will overwrite the existing document's - * value. If set to true, objects will be merged. The default is true. - * @return options - */ - public DocumentUpdateOptions mergeObjects(final Boolean mergeObjects) { - this.mergeObjects = mergeObjects; - return this; - } - - public Boolean getWaitForSync() { - return waitForSync; - } - - /** - * @param waitForSync - * Wait until document has been synced to disk. - * @return options - */ - public DocumentUpdateOptions waitForSync(final Boolean waitForSync) { - this.waitForSync = waitForSync; - return this; - } - - public Boolean getIgnoreRevs() { - return ignoreRevs; - } - - /** - * @param ignoreRevs - * By default, or if this is set to true, the _rev attributes in the given document is ignored. If this - * is set to false, then the _rev attribute given in the body document is taken as a precondition. The - * document is only updated if the current revision is the one specified. - * @return options - */ - public DocumentUpdateOptions ignoreRevs(final Boolean ignoreRevs) { - this.ignoreRevs = ignoreRevs; - return this; - } - - public String getIfMatch() { - return ifMatch; - } - - /** - * @param ifMatch - * update a document based on target revision - * @return options - */ - public DocumentUpdateOptions ifMatch(final String ifMatch) { - this.ifMatch = ifMatch; - return this; - } - - public Boolean getReturnNew() { - return returnNew; - } - - /** - * @param returnNew - * Return additionally the complete new document under the attribute new in the result. - * @return options - */ - public DocumentUpdateOptions returnNew(final Boolean returnNew) { - this.returnNew = returnNew; - return this; - } - - public Boolean getReturnOld() { - return returnOld; - } - - /** - * @param returnOld - * Return additionally the complete previous revision of the changed document under the attribute old in - * the result. - * @return options - */ - public DocumentUpdateOptions returnOld(final Boolean returnOld) { - this.returnOld = returnOld; - return this; - } - - public Boolean getSerializeNull() { - return serializeNull; - } - - /** - * @param serializeNull - * By default, or if this is set to true, all fields of the document which have null values are - * serialized to VelocyPack otherwise they are excluded from serialization. Use this to update single - * fields from a stored document. - * @return options - */ - public DocumentUpdateOptions serializeNull(final Boolean serializeNull) { - this.serializeNull = serializeNull; - return this; - } - -} diff --git a/src/main/java/com/arangodb/model/EdgeCreateOptions.java b/src/main/java/com/arangodb/model/EdgeCreateOptions.java deleted file mode 100644 index 8830f92c2..000000000 --- a/src/main/java/com/arangodb/model/EdgeCreateOptions.java +++ /dev/null @@ -1,50 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.model; - -/** - * @author Mark Vollmary - * - * @see API Documentation - */ -public class EdgeCreateOptions { - - private Boolean waitForSync; - - public EdgeCreateOptions() { - super(); - } - - public Boolean getWaitForSync() { - return waitForSync; - } - - /** - * @param waitForSync - * Wait until document has been synced to disk. - * @return options - */ - public EdgeCreateOptions waitForSync(final Boolean waitForSync) { - this.waitForSync = waitForSync; - return this; - } - -} diff --git a/src/main/java/com/arangodb/model/EdgeDeleteOptions.java b/src/main/java/com/arangodb/model/EdgeDeleteOptions.java deleted file mode 100644 index b57e5ba05..000000000 --- a/src/main/java/com/arangodb/model/EdgeDeleteOptions.java +++ /dev/null @@ -1,64 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.model; - -/** - * @author Mark Vollmary - * - * @see API Documentation - */ -public class EdgeDeleteOptions { - - private Boolean waitForSync; - private String ifMatch; - - public EdgeDeleteOptions() { - super(); - } - - public Boolean getWaitForSync() { - return waitForSync; - } - - /** - * @param waitForSync - * Wait until deletion operation has been synced to disk. - * @return options - */ - public EdgeDeleteOptions waitForSync(final Boolean waitForSync) { - this.waitForSync = waitForSync; - return this; - } - - public String getIfMatch() { - return ifMatch; - } - - /** - * @param ifMatch - * remove a document based on a target revision - * @return options - */ - public EdgeDeleteOptions ifMatch(final String ifMatch) { - this.ifMatch = ifMatch; - return this; - } -} diff --git a/src/main/java/com/arangodb/model/EdgeReplaceOptions.java b/src/main/java/com/arangodb/model/EdgeReplaceOptions.java deleted file mode 100644 index 0eb16ebbe..000000000 --- a/src/main/java/com/arangodb/model/EdgeReplaceOptions.java +++ /dev/null @@ -1,64 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.model; - -/** - * @author Mark Vollmary - * - * @see API Documentation - */ -public class EdgeReplaceOptions { - - private Boolean waitForSync; - private String ifMatch; - - public EdgeReplaceOptions() { - super(); - } - - public Boolean getWaitForSync() { - return waitForSync; - } - - /** - * @param waitForSync - * Wait until document has been synced to disk. - * @return options - */ - public EdgeReplaceOptions waitForSync(final Boolean waitForSync) { - this.waitForSync = waitForSync; - return this; - } - - public String getIfMatch() { - return ifMatch; - } - - /** - * @param ifMatch - * replace a document based on target revision - * @return options - */ - public EdgeReplaceOptions ifMatch(final String ifMatch) { - this.ifMatch = ifMatch; - return this; - } -} diff --git a/src/main/java/com/arangodb/model/EdgeUpdateOptions.java b/src/main/java/com/arangodb/model/EdgeUpdateOptions.java deleted file mode 100644 index 0dac5f1b8..000000000 --- a/src/main/java/com/arangodb/model/EdgeUpdateOptions.java +++ /dev/null @@ -1,82 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.model; - -/** - * @author Mark Vollmary - * - * @see API Documentation - */ -public class EdgeUpdateOptions { - - private Boolean keepNull; - private Boolean waitForSync; - private String ifMatch; - - public EdgeUpdateOptions() { - super(); - } - - public Boolean getKeepNull() { - return keepNull; - } - - /** - * @param keepNull - * If the intention is to delete existing attributes with the patch command, the URL query parameter - * keepNull can be used with a value of false. This will modify the behavior of the patch command to - * remove any attributes from the existing document that are contained in the patch document with an - * attribute value of null. - * @return options - */ - public EdgeUpdateOptions keepNull(final Boolean keepNull) { - this.keepNull = keepNull; - return this; - } - - public Boolean getWaitForSync() { - return waitForSync; - } - - /** - * @param waitForSync - * Wait until document has been synced to disk. - * @return options - */ - public EdgeUpdateOptions waitForSync(final Boolean waitForSync) { - this.waitForSync = waitForSync; - return this; - } - - public String getIfMatch() { - return ifMatch; - } - - /** - * @param ifMatch - * replace a document based on target revision - * @return options - */ - public EdgeUpdateOptions ifMatch(final String ifMatch) { - this.ifMatch = ifMatch; - return this; - } -} diff --git a/src/main/java/com/arangodb/model/FulltextIndexOptions.java b/src/main/java/com/arangodb/model/FulltextIndexOptions.java deleted file mode 100644 index 5f8602a33..000000000 --- a/src/main/java/com/arangodb/model/FulltextIndexOptions.java +++ /dev/null @@ -1,74 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.model; - -import com.arangodb.entity.IndexType; - -/** - * @author Mark Vollmary - * - * @see API - * Documentation - */ -public class FulltextIndexOptions { - - private Iterable fields; - private final IndexType type = IndexType.fulltext; - private Integer minLength; - - public FulltextIndexOptions() { - super(); - } - - protected Iterable getFields() { - return fields; - } - - /** - * @param fields - * A list of attribute paths - * @return options - */ - protected FulltextIndexOptions fields(final Iterable fields) { - this.fields = fields; - return this; - } - - protected IndexType getType() { - return type; - } - - public Integer getMinLength() { - return minLength; - } - - /** - * @param minLength - * Minimum character length of words to index. Will default to a server-defined value if unspecified. It - * is thus recommended to set this value explicitly when creating the index. - * @return options - */ - public FulltextIndexOptions minLength(final Integer minLength) { - this.minLength = minLength; - return this; - } - -} diff --git a/src/main/java/com/arangodb/model/GeoIndexOptions.java b/src/main/java/com/arangodb/model/GeoIndexOptions.java deleted file mode 100644 index 526b2e436..000000000 --- a/src/main/java/com/arangodb/model/GeoIndexOptions.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.model; - -import com.arangodb.entity.IndexType; - -/** - * @author Mark Vollmary - * - * @see API Documentation - */ -public class GeoIndexOptions { - - private Iterable fields; - private final IndexType type = IndexType.geo; - private Boolean geoJson; - - public GeoIndexOptions() { - super(); - } - - protected Iterable getFields() { - return fields; - } - - /** - * @param fields - * A list of attribute paths - * @return options - */ - protected GeoIndexOptions fields(final Iterable fields) { - this.fields = fields; - return this; - } - - protected IndexType getType() { - return type; - } - - public Boolean getGeoJson() { - return geoJson; - } - - /** - * @param geoJson - * If a geo-spatial index on a location is constructed and geoJson is true, then the order within the - * array is longitude followed by latitude. This corresponds to the format described in - * @return options - */ - public GeoIndexOptions geoJson(final Boolean geoJson) { - this.geoJson = geoJson; - return this; - } - -} diff --git a/src/main/java/com/arangodb/model/GraphCreateOptions.java b/src/main/java/com/arangodb/model/GraphCreateOptions.java deleted file mode 100644 index 63778d833..000000000 --- a/src/main/java/com/arangodb/model/GraphCreateOptions.java +++ /dev/null @@ -1,193 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.model; - -import java.util.Arrays; -import java.util.Collection; - -import com.arangodb.entity.EdgeDefinition; - -/** - * @author Mark Vollmary - * - * @see API Documentation - */ -public class GraphCreateOptions { - - private String name; - private Collection edgeDefinitions; - private Collection orphanCollections; - private Boolean isSmart; - private SmartOptions options; - - public GraphCreateOptions() { - super(); - } - - protected String getName() { - return name; - } - - /** - * @param name - * Name of the graph - * @return options - */ - protected GraphCreateOptions name(final String name) { - this.name = name; - return this; - } - - public Collection getEdgeDefinitions() { - return edgeDefinitions; - } - - /** - * @param edgeDefinitions - * An array of definitions for the edge - * @return options - */ - protected GraphCreateOptions edgeDefinitions(final Collection edgeDefinitions) { - this.edgeDefinitions = edgeDefinitions; - return this; - } - - public Collection getOrphanCollections() { - return orphanCollections; - } - - /** - * @param orphanCollections - * Additional vertex collections - * @return options - */ - public GraphCreateOptions orphanCollections(final String... orphanCollections) { - this.orphanCollections = Arrays.asList(orphanCollections); - return this; - } - - public Boolean getIsSmart() { - return isSmart; - } - - /** - * - * @param isSmart - * Define if the created graph should be smart. This only has effect in Enterprise version. - * @return options - */ - public GraphCreateOptions isSmart(final Boolean isSmart) { - this.isSmart = isSmart; - return this; - } - - public Integer getReplicationFactor() { - return getOptions().getReplicationFactor(); - } - - /** - * @param replicationFactor - * (The default is 1): in a cluster, this attribute determines how many copies of each shard are kept on - * different DBServers. The value 1 means that only one copy (no synchronous replication) is kept. A - * value of k means that k-1 replicas are kept. Any two copies reside on different DBServers. Replication - * between them is synchronous, that is, every write operation to the "leader" copy will be replicated to - * all "follower" replicas, before the write operation is reported successful. If a server fails, this is - * detected automatically and one of the servers holding copies take over, usually without an error being - * reported. - * @return options - */ - public GraphCreateOptions replicationFactor(final Integer replicationFactor) { - getOptions().setReplicationFactor(replicationFactor); - return this; - } - - public Integer getNumberOfShards() { - return getOptions().getNumberOfShards(); - } - - /** - * @param numberOfShards - * The number of shards that is used for every collection within this graph. Cannot be modified later. - * @return options - */ - public GraphCreateOptions numberOfShards(final Integer numberOfShards) { - getOptions().setNumberOfShards(numberOfShards); - return this; - } - - public String getSmartGraphAttribute() { - return getOptions().getSmartGraphAttribute(); - } - - /** - * @param smartGraphAttribute - * The attribute name that is used to smartly shard the vertices of a graph. Every vertex in this Graph - * has to have this attribute. Cannot be modified later. - * @return options - */ - public GraphCreateOptions smartGraphAttribute(final String smartGraphAttribute) { - getOptions().setSmartGraphAttribute(smartGraphAttribute); - return this; - } - - private SmartOptions getOptions() { - if (options == null) { - options = new SmartOptions(); - } - return options; - } - - public static class SmartOptions { - private Integer replicationFactor; - private Integer numberOfShards; - private String smartGraphAttribute; - - public SmartOptions() { - super(); - } - - public Integer getReplicationFactor() { - return replicationFactor; - } - - public void setReplicationFactor(final Integer replicationFactor) { - this.replicationFactor = replicationFactor; - } - - public Integer getNumberOfShards() { - return numberOfShards; - } - - public void setNumberOfShards(final Integer numberOfShards) { - this.numberOfShards = numberOfShards; - } - - public String getSmartGraphAttribute() { - return smartGraphAttribute; - } - - public void setSmartGraphAttribute(final String smartGraphAttribute) { - this.smartGraphAttribute = smartGraphAttribute; - } - - } - -} diff --git a/src/main/java/com/arangodb/model/HashIndexOptions.java b/src/main/java/com/arangodb/model/HashIndexOptions.java deleted file mode 100644 index 4e4d39285..000000000 --- a/src/main/java/com/arangodb/model/HashIndexOptions.java +++ /dev/null @@ -1,102 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.model; - -import com.arangodb.entity.IndexType; - -/** - * @author Mark Vollmary - * - * @see API Documentation - */ -public class HashIndexOptions { - - private Iterable fields; - private final IndexType type = IndexType.hash; - private Boolean unique; - private Boolean sparse; - private Boolean deduplicate; - - public HashIndexOptions() { - super(); - } - - protected Iterable getFields() { - return fields; - } - - /** - * @param fields - * A list of attribute paths - * @return options - */ - protected HashIndexOptions fields(final Iterable fields) { - this.fields = fields; - return this; - } - - protected IndexType getType() { - return type; - } - - public Boolean getUnique() { - return unique; - } - - /** - * @param unique - * if true, then create a unique index - * @return options - */ - public HashIndexOptions unique(final Boolean unique) { - this.unique = unique; - return this; - } - - public Boolean getSparse() { - return sparse; - } - - /** - * @param sparse - * if true, then create a sparse index - * @return options - */ - public HashIndexOptions sparse(final Boolean sparse) { - this.sparse = sparse; - return this; - } - - public Boolean getDeduplicate() { - return deduplicate; - } - - /** - * @param deduplicate - * if false, the deduplication of array values is turned off. - * @return options - */ - public HashIndexOptions deduplicate(final Boolean deduplicate) { - this.deduplicate = deduplicate; - return this; - } - -} diff --git a/src/main/java/com/arangodb/model/LogOptions.java b/src/main/java/com/arangodb/model/LogOptions.java deleted file mode 100644 index 9ba095224..000000000 --- a/src/main/java/com/arangodb/model/LogOptions.java +++ /dev/null @@ -1,159 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.model; - -import com.arangodb.entity.LogLevel; - -/** - * @author Mark Vollmary - * - * @see API - * Documentation - */ -public class LogOptions { - - public static final String PROPERTY_UPTO = "upto"; - public static final String PROPERTY_LEVEL = "level"; - public static final String PROPERTY_START = "start"; - public static final String PROPERTY_SIZE = "size"; - public static final String PROPERTY_OFFSET = "offset"; - public static final String PROPERTY_SEARCH = "search"; - public static final String PROPERTY_SORT = "sort"; - - public enum SortOrder { - asc, desc - } - - private LogLevel upto; - private LogLevel level; - private Long start; - private Integer size; - private Integer offset; - private String search; - private SortOrder sort; - - public LogOptions() { - super(); - } - - public LogLevel getUpto() { - return upto; - } - - /** - * @param upto - * Returns all log entries up to log level upto - * @return options - */ - public LogOptions upto(final LogLevel upto) { - this.upto = upto; - return this; - } - - public LogLevel getLevel() { - return level; - } - - /** - * @param level - * Returns all log entries of log level level. Note that the query parameters upto and level are mutually - * exclusive - * @return options - */ - public LogOptions level(final LogLevel level) { - this.level = level; - return this; - } - - public Long getStart() { - return start; - } - - /** - * @param start - * Returns all log entries such that their log entry identifier (lid value) is greater or equal to start - * @return options - */ - public LogOptions start(final Long start) { - this.start = start; - return this; - } - - public Integer getSize() { - return size; - } - - /** - * @param size - * Restricts the result to at most size log entries - * @return options - */ - public LogOptions size(final Integer size) { - this.size = size; - return this; - } - - public Integer getOffset() { - return offset; - } - - /** - * @param offset - * Starts to return log entries skipping the first offset log entries. offset and size can be used for - * pagination - * @return options - */ - public LogOptions offset(final Integer offset) { - this.offset = offset; - return this; - } - - public String getSearch() { - return search; - } - - /** - * @param search - * Only return the log entries containing the text specified in search - * @return options - */ - public LogOptions search(final String search) { - this.search = search; - return this; - } - - public SortOrder getSort() { - return sort; - } - - /** - * @param sort - * Sort the log entries either ascending (if sort is asc) or descending (if sort is desc) according to - * their lid values. Note that the lid imposes a chronological order. The default value is asc - * @return options - */ - public LogOptions sort(final SortOrder sort) { - this.sort = sort; - return this; - } - -} diff --git a/src/main/java/com/arangodb/model/OptionsBuilder.java b/src/main/java/com/arangodb/model/OptionsBuilder.java deleted file mode 100644 index 0f33f1623..000000000 --- a/src/main/java/com/arangodb/model/OptionsBuilder.java +++ /dev/null @@ -1,121 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.model; - -import java.util.Collection; -import java.util.Map; - -import com.arangodb.entity.EdgeDefinition; -import com.arangodb.entity.Permissions; - -/** - * @author Mark Vollmary - * - */ -public class OptionsBuilder { - - private OptionsBuilder() { - super(); - } - - public static UserCreateOptions build(final UserCreateOptions options, final String user, final String passwd) { - return options.user(user).passwd(passwd); - } - - public static HashIndexOptions build(final HashIndexOptions options, final Iterable fields) { - return options.fields(fields); - } - - public static SkiplistIndexOptions build(final SkiplistIndexOptions options, final Iterable fields) { - return options.fields(fields); - } - - public static PersistentIndexOptions build(final PersistentIndexOptions options, final Iterable fields) { - return options.fields(fields); - } - - public static GeoIndexOptions build(final GeoIndexOptions options, final Iterable fields) { - return options.fields(fields); - } - - public static FulltextIndexOptions build(final FulltextIndexOptions options, final Iterable fields) { - return options.fields(fields); - } - - public static CollectionCreateOptions build(final CollectionCreateOptions options, final String name) { - return options.name(name); - } - - public static AqlQueryOptions build( - final AqlQueryOptions options, - final String query, - final Map bindVars) { - return options.query(query).bindVars(bindVars); - } - - public static AqlQueryExplainOptions build( - final AqlQueryExplainOptions options, - final String query, - final Map bindVars) { - return options.query(query).bindVars(bindVars); - } - - public static AqlQueryParseOptions build(final AqlQueryParseOptions options, final String query) { - return options.query(query); - } - - public static GraphCreateOptions build( - final GraphCreateOptions options, - final String name, - final Collection edgeDefinitions) { - return options.name(name).edgeDefinitions(edgeDefinitions); - } - - public static TransactionOptions build(final TransactionOptions options, final String action) { - return options.action(action); - } - - public static CollectionRenameOptions build(final CollectionRenameOptions options, final String name) { - return options.name(name); - } - - public static DBCreateOptions build(final DBCreateOptions options, final String name) { - return options.name(name); - } - - public static UserAccessOptions build(final UserAccessOptions options, final Permissions grant) { - return options.grant(grant); - } - - public static AqlFunctionCreateOptions build( - final AqlFunctionCreateOptions options, - final String name, - final String code) { - return options.name(name).code(code); - } - - public static VertexCollectionCreateOptions build( - final VertexCollectionCreateOptions options, - final String collection) { - return options.collection(collection); - } - -} diff --git a/src/main/java/com/arangodb/model/PersistentIndexOptions.java b/src/main/java/com/arangodb/model/PersistentIndexOptions.java deleted file mode 100644 index e294c8782..000000000 --- a/src/main/java/com/arangodb/model/PersistentIndexOptions.java +++ /dev/null @@ -1,88 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.model; - -import com.arangodb.entity.IndexType; - -/** - * @author Mark Vollmary - * - * @see API - * Documentation - */ -public class PersistentIndexOptions { - - private Iterable fields; - protected IndexType type = IndexType.persistent; - private Boolean unique; - private Boolean sparse; - - public PersistentIndexOptions() { - super(); - } - - protected Iterable getFields() { - return fields; - } - - /** - * @param fields - * A list of attribute paths - * @return options - */ - protected PersistentIndexOptions fields(final Iterable fields) { - this.fields = fields; - return this; - } - - protected IndexType getType() { - return type; - } - - public Boolean getUnique() { - return unique; - } - - /** - * @param unique - * if true, then create a unique index - * @return options - */ - public PersistentIndexOptions unique(final Boolean unique) { - this.unique = unique; - return this; - } - - public Boolean getSparse() { - return sparse; - } - - /** - * @param sparse - * if true, then create a sparse index - * @return options - */ - public PersistentIndexOptions sparse(final Boolean sparse) { - this.sparse = sparse; - return this; - } - -} diff --git a/src/main/java/com/arangodb/model/SkiplistIndexOptions.java b/src/main/java/com/arangodb/model/SkiplistIndexOptions.java deleted file mode 100644 index 7ddc8dd92..000000000 --- a/src/main/java/com/arangodb/model/SkiplistIndexOptions.java +++ /dev/null @@ -1,102 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.model; - -import com.arangodb.entity.IndexType; - -/** - * @author Mark Vollmary - * - * @see API Documentation - */ -public class SkiplistIndexOptions { - - private Iterable fields; - private final IndexType type = IndexType.skiplist; - private Boolean unique; - private Boolean sparse; - private Boolean deduplicate; - - public SkiplistIndexOptions() { - super(); - } - - protected Iterable getFields() { - return fields; - } - - /** - * @param fields - * A list of attribute paths - * @return options - */ - protected SkiplistIndexOptions fields(final Iterable fields) { - this.fields = fields; - return this; - } - - protected IndexType getType() { - return type; - } - - public Boolean getUnique() { - return unique; - } - - /** - * @param unique - * if true, then create a unique index - * @return options - */ - public SkiplistIndexOptions unique(final Boolean unique) { - this.unique = unique; - return this; - } - - public Boolean getSparse() { - return sparse; - } - - /** - * @param sparse - * if true, then create a sparse index - * @return options - */ - public SkiplistIndexOptions sparse(final Boolean sparse) { - this.sparse = sparse; - return this; - } - - public Boolean getDeduplicate() { - return deduplicate; - } - - /** - * @param deduplicate - * if false, the deduplication of array values is turned off. - * @return options - */ - public SkiplistIndexOptions deduplicate(final Boolean deduplicate) { - this.deduplicate = deduplicate; - return this; - } - -} diff --git a/src/main/java/com/arangodb/model/TransactionOptions.java b/src/main/java/com/arangodb/model/TransactionOptions.java deleted file mode 100644 index 8dca1636f..000000000 --- a/src/main/java/com/arangodb/model/TransactionOptions.java +++ /dev/null @@ -1,214 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.model; - -import java.util.Arrays; -import java.util.Collection; - -/** - * @author Mark Vollmary - * - * @see API - * Documentation - */ -public class TransactionOptions { - - private String action; - private Object params; - private final TransactionCollectionOptions collections; - private Integer lockTimeout; - private Boolean waitForSync; - private Long maxTransactionSize; - private Long intermediateCommitCount; - private Long intermediateCommitSize; - - public TransactionOptions() { - super(); - collections = new TransactionCollectionOptions(); - } - - protected String getAction() { - return action; - } - - /** - * @param action - * the actual transaction operations to be executed, in the form of stringified JavaScript code - * @return options - */ - protected TransactionOptions action(final String action) { - this.action = action; - return this; - } - - public Object getParams() { - return params; - } - - /** - * @param params - * optional arguments passed to action - * @return options - */ - public TransactionOptions params(final Object params) { - this.params = params; - return this; - } - - public Integer getLockTimeout() { - return lockTimeout; - } - - /** - * @param lockTimeout - * an optional numeric value that can be used to set a timeout for waiting on collection locks. If not - * specified, a default value will be used. Setting lockTimeout to 0 will make ArangoDB not time out - * waiting for a lock. - * @return options - */ - public TransactionOptions lockTimeout(final Integer lockTimeout) { - this.lockTimeout = lockTimeout; - return this; - } - - public Boolean getWaitForSync() { - return waitForSync; - } - - /** - * @param waitForSync - * an optional boolean flag that, if set, will force the transaction to write all data to disk before - * returning - * @return options - */ - public TransactionOptions waitForSync(final Boolean waitForSync) { - this.waitForSync = waitForSync; - return this; - } - - /** - * @param read - * contains the array of collection-names to be used in the transaction (mandatory) for read - * @return options - */ - public TransactionOptions readCollections(final String... read) { - collections.read(read); - return this; - } - - /** - * @param write - * contains the array of collection-names to be used in the transaction (mandatory) for write - * @return options - */ - public TransactionOptions writeCollections(final String... write) { - collections.write(write); - return this; - } - - public TransactionOptions allowImplicit(final Boolean allowImplicit) { - collections.allowImplicit(allowImplicit); - return this; - } - - public Long getMaxTransactionSize() { - return maxTransactionSize; - } - - /** - * @param maxTransactionSize - * Transaction size limit in bytes. Honored by the RocksDB storage engine only. - * @since ArangoDB 3.2.0 - * @return options - */ - public TransactionOptions maxTransactionSize(final Long maxTransactionSize) { - this.maxTransactionSize = maxTransactionSize; - return this; - } - - public Long getIntermediateCommitCount() { - return intermediateCommitCount; - } - - /** - * @param intermediateCommitCount - * Maximum number of operations after which an intermediate commit is performed automatically. Honored by - * the RocksDB storage engine only. - * @since ArangoDB 3.2.0 - * @return options - */ - public TransactionOptions intermediateCommitCount(final Long intermediateCommitCount) { - this.intermediateCommitCount = intermediateCommitCount; - return this; - } - - public Long getIntermediateCommitSize() { - return intermediateCommitSize; - } - - /** - * @param intermediateCommitSize - * Maximum total size of operations after which an intermediate commit is performed automatically. - * Honored by the RocksDB storage engine only. - * @since ArangoDB 3.2.0 - * @return options - */ - public TransactionOptions intermediateCommitSize(final Long intermediateCommitSize) { - this.intermediateCommitSize = intermediateCommitSize; - return this; - } - - public static class TransactionCollectionOptions { - - private Collection read; - private Collection write; - private Boolean allowImplicit; - - public Collection getRead() { - return read; - } - - public TransactionCollectionOptions read(final String... read) { - this.read = Arrays.asList(read); - return this; - } - - public Collection getWrite() { - return write; - } - - public TransactionCollectionOptions write(final String... write) { - this.write = Arrays.asList(write); - return this; - } - - public Boolean getAllowImplicit() { - return allowImplicit; - } - - public TransactionCollectionOptions allowImplicit(final Boolean allowImplicit) { - this.allowImplicit = allowImplicit; - return this; - } - - } - -} diff --git a/src/main/java/com/arangodb/model/TraversalOptions.java b/src/main/java/com/arangodb/model/TraversalOptions.java deleted file mode 100644 index a85684698..000000000 --- a/src/main/java/com/arangodb/model/TraversalOptions.java +++ /dev/null @@ -1,387 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.model; - -/** - * @author Mark Vollmary - * - * @see API Documentation - */ -public class TraversalOptions { - - public static enum Direction { - outbound, inbound, any - } - - public static enum ItemOrder { - forward, backward - } - - public static enum Strategy { - depthfirst, breadthfirst - } - - public static enum UniquenessType { - none, global, path - } - - public static enum Order { - preorder, postorder, preorder_expander - } - - private String sort; - private Direction direction; - private Integer minDepth; - private String startVertex; - private String visitor; - private ItemOrder itemOrder; - private Strategy strategy; - private String filter; - private String init; - private Integer maxIterations; - private Integer maxDepth; - private Uniqueness uniqueness; - private Order order; - private String graphName; - private String expander; - private String edgeCollection; - - public String getSort() { - return sort; - } - - /** - * - * @param sort - * JavaScript code of a custom comparison function for the edges. The signature of this function is (l, - * r) -> integer (where l and r are edges) and must return -1 if l is smaller than, +1 if l is greater - * than, and 0 if l and r are equal. The reason for this is the following: The order of edges returned - * for a certain vertex is undefined. This is because there is no natural order of edges for a vertex - * with multiple connected edges. To explicitly define the order in which edges on the vertex are - * followed, you can specify an edge comparator function with this attribute. Note that the value here - * has to be a string to conform to the JSON standard, which in turn is parsed as function body on the - * server side. Furthermore note that this attribute is only used for the standard expanders. If you use - * your custom expander you have to do the sorting yourself within the expander code. - * @return options - */ - public TraversalOptions sort(final String sort) { - this.sort = sort; - return this; - } - - public Direction getDirection() { - return direction; - } - - /** - * - * @param direction - * direction for traversal - * - * if set, must be either "outbound", "inbound", or "any" - * - * if not set, the expander attribute must be specified - * @return options - */ - public TraversalOptions direction(final Direction direction) { - this.direction = direction; - return this; - } - - public Integer getMinDepth() { - return minDepth; - } - - /** - * - * @param minDepth - * ANDed with any existing filters): visits only nodes in at least the given depth - * @return options - */ - public TraversalOptions minDepth(final Integer minDepth) { - this.minDepth = minDepth; - return this; - } - - public String getStartVertex() { - return startVertex; - } - - /** - * - * @param startVertex - * The id of the startVertex, e.g. "users/foo". - * @return options - */ - public TraversalOptions startVertex(final String startVertex) { - this.startVertex = startVertex; - return this; - } - - public String getVisitor() { - return visitor; - } - - /** - * - * @param visitor - * JavaScript code of custom visitor function function signature: (config, result, vertex, path, - * connected) -> void The visitor function can do anything, but its return value is ignored. To populate - * a result, use the result variable by reference. Note that the connected argument is only populated - * when the order attribute is set to "preorder-expander". - * @return options - */ - public TraversalOptions visitor(final String visitor) { - this.visitor = visitor; - return this; - } - - public ItemOrder getItemOrder() { - return itemOrder; - } - - /** - * - * @param itemOrder - * The item iteration order can be "forward" or "backward" - * @return options - */ - public TraversalOptions itemOrder(final ItemOrder itemOrder) { - this.itemOrder = itemOrder; - return this; - } - - public Strategy getStrategy() { - return strategy; - } - - /** - * - * @param strategy - * The traversal strategy can be "depthfirst" or "breadthfirst" - * @return options - */ - public TraversalOptions strategy(final Strategy strategy) { - this.strategy = strategy; - return this; - } - - public String getFilter() { - return filter; - } - - /** - * - * @param filter - * default is to include all nodes: body (JavaScript code) of custom filter function function signature: - * (config, vertex, path) -> mixed can return four different string values: - * - * "exclude" -> this vertex will not be visited. - * - * "prune" -> the edges of this vertex will not be followed. - * - * "" or undefined -> visit the vertex and follow it's edges. - * - * Array -> containing any combination of the above. - * - * If there is at least one "exclude" or "prune" respectivly is contained, it's effect will occur. - * @return options - */ - public TraversalOptions filter(final String filter) { - this.filter = filter; - return this; - } - - public String getInit() { - return init; - } - - /** - * - * @param init - * JavaScript code of custom result initialization function function signature: (config, result) -> void - * initialize any values in result with what is required - * @return options - */ - public TraversalOptions init(final String init) { - this.init = init; - return this; - } - - public Integer getMaxIterations() { - return maxIterations; - } - - /** - * - * @param maxIterations - * Maximum number of iterations in each traversal. This number can be set to prevent endless loops in - * traversal of cyclic graphs. When a traversal performs as many iterations as the maxIterations value, - * the traversal will abort with an error. If maxIterations is not set, a server-defined value may be - * used. - * @return options - */ - public TraversalOptions maxIterations(final Integer maxIterations) { - this.maxIterations = maxIterations; - return this; - } - - public Integer getMaxDepth() { - return maxDepth; - } - - /** - * - * @param maxDepth - * ANDed with any existing filters visits only nodes in at most the given depth. - * @return options - */ - public TraversalOptions maxDepth(final Integer maxDepth) { - this.maxDepth = maxDepth; - return this; - } - - public UniquenessType getVerticesUniqueness() { - return uniqueness != null ? uniqueness.vertices : null; - } - - /** - * - * @param vertices - * Specifies uniqueness for vertices can be "none", "global" or "path" - * @return options - */ - public TraversalOptions verticesUniqueness(final UniquenessType vertices) { - getUniqueness().setVertices(vertices); - return this; - } - - public UniquenessType getEdgesUniqueness() { - return uniqueness != null ? uniqueness.edges : null; - } - - /** - * - * @param edges - * Specifies uniqueness for edges can be "none", "global" or "path" - * @return options - */ - public TraversalOptions edgesUniqueness(final UniquenessType edges) { - getUniqueness().setEdges(edges); - return this; - } - - public Order getOrder() { - return order; - } - - /** - * - * @param order - * The traversal order can be "preorder", "postorder" or "preorder-expander" - * @return options - */ - public TraversalOptions order(final Order order) { - this.order = order; - return this; - } - - public String getGraphName() { - return graphName; - } - - /** - * - * @param graphName - * The name of the graph that contains the edges. Either edgeCollection or graphName has to be given. In - * case both values are set the graphName is prefered. - * @return options - */ - public TraversalOptions graphName(final String graphName) { - this.graphName = graphName; - return this; - } - - public String getExpander() { - return expander; - } - - /** - * - * @param expander - * JavaScript code of custom expander function must be set if direction attribute is not set function - * signature: (config, vertex, path) -> array expander must return an array of the connections for vertex - * each connection is an object with the attributes edge and vertex - * @return options - */ - public TraversalOptions expander(final String expander) { - this.expander = expander; - return this; - } - - public String getEdgeCollection() { - return edgeCollection; - } - - /** - * - * @param edgeCollection - * The name of the collection that contains the edges. - * @return options - */ - public TraversalOptions edgeCollection(final String edgeCollection) { - this.edgeCollection = edgeCollection; - return this; - } - - public static class Uniqueness { - - private UniquenessType vertices; - private UniquenessType edges; - - public UniquenessType getVertices() { - return vertices; - } - - public void setVertices(final UniquenessType vertices) { - this.vertices = vertices; - } - - public UniquenessType getEdges() { - return edges; - } - - public void setEdges(final UniquenessType edges) { - this.edges = edges; - } - } - - private Uniqueness getUniqueness() { - if (uniqueness == null) { - uniqueness = new Uniqueness(); - uniqueness.vertices = UniquenessType.none; - uniqueness.edges = UniquenessType.none; - } - - return uniqueness; - } - -} diff --git a/src/main/java/com/arangodb/model/UserCreateOptions.java b/src/main/java/com/arangodb/model/UserCreateOptions.java deleted file mode 100644 index 2dfecc0c8..000000000 --- a/src/main/java/com/arangodb/model/UserCreateOptions.java +++ /dev/null @@ -1,98 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.model; - -import java.util.Map; - -/** - * @author Mark Vollmary - * - * @see API Documentation - */ -public class UserCreateOptions { - - private String user; - private String passwd; - private Boolean active; - private Map extra; - - public UserCreateOptions() { - super(); - } - - protected String getUser() { - return user; - } - - /** - * @param user - * The name of the user - * @return options - */ - protected UserCreateOptions user(final String user) { - this.user = user; - return this; - } - - protected String getPasswd() { - return passwd; - } - - /** - * @param passwd - * The user password - * @return options - */ - protected UserCreateOptions passwd(final String passwd) { - this.passwd = passwd; - return this; - } - - public Boolean getActive() { - return active; - } - - /** - * @param active - * An optional flag that specifies whether the user is active. If not specified, this will default to - * true - * @return options - */ - public UserCreateOptions active(final Boolean active) { - this.active = active; - return this; - } - - public Map getExtra() { - return extra; - } - - /** - * @param extra - * Optional data about the user - * @return options - */ - public UserCreateOptions extra(final Map extra) { - this.extra = extra; - return this; - } - -} diff --git a/src/main/java/com/arangodb/model/UserUpdateOptions.java b/src/main/java/com/arangodb/model/UserUpdateOptions.java deleted file mode 100644 index 33d3b18f3..000000000 --- a/src/main/java/com/arangodb/model/UserUpdateOptions.java +++ /dev/null @@ -1,83 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.model; - -import java.util.Map; - -/** - * @author Mark Vollmary - * - * @see API Documentation - */ -public class UserUpdateOptions { - - private String passwd; - private Boolean active; - private Map extra; - - public UserUpdateOptions() { - super(); - } - - public String getPasswd() { - return passwd; - } - - /** - * @param passwd - * The user password - * @return options - */ - public UserUpdateOptions passwd(final String passwd) { - this.passwd = passwd; - return this; - } - - public Boolean getActive() { - return active; - } - - /** - * @param active - * An optional flag that specifies whether the user is active. If not specified, this will default to - * true - * @return options - */ - public UserUpdateOptions active(final Boolean active) { - this.active = active; - return this; - } - - public Map getExtra() { - return extra; - } - - /** - * @param extra - * Optional data about the user - * @return options - */ - public UserUpdateOptions extra(final Map extra) { - this.extra = extra; - return this; - } - -} diff --git a/src/main/java/com/arangodb/model/VertexCreateOptions.java b/src/main/java/com/arangodb/model/VertexCreateOptions.java deleted file mode 100644 index c0af4aece..000000000 --- a/src/main/java/com/arangodb/model/VertexCreateOptions.java +++ /dev/null @@ -1,50 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.model; - -/** - * @author Mark Vollmary - * - * @see API Documentation - */ -public class VertexCreateOptions { - - private Boolean waitForSync; - - public VertexCreateOptions() { - super(); - } - - public Boolean getWaitForSync() { - return waitForSync; - } - - /** - * @param waitForSync - * Wait until document has been synced to disk. - * @return options - */ - public VertexCreateOptions waitForSync(final Boolean waitForSync) { - this.waitForSync = waitForSync; - return this; - } - -} diff --git a/src/main/java/com/arangodb/model/VertexDeleteOptions.java b/src/main/java/com/arangodb/model/VertexDeleteOptions.java deleted file mode 100644 index 19fb696d7..000000000 --- a/src/main/java/com/arangodb/model/VertexDeleteOptions.java +++ /dev/null @@ -1,64 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.model; - -/** - * @author Mark Vollmary - * - * @see API Documentation - */ -public class VertexDeleteOptions { - - private Boolean waitForSync; - private String ifMatch; - - public VertexDeleteOptions() { - super(); - } - - public Boolean getWaitForSync() { - return waitForSync; - } - - /** - * @param waitForSync - * Wait until deletion operation has been synced to disk. - * @return options - */ - public VertexDeleteOptions waitForSync(final Boolean waitForSync) { - this.waitForSync = waitForSync; - return this; - } - - public String getIfMatch() { - return ifMatch; - } - - /** - * @param ifMatch - * remove a document based on a target revision - * @return options - */ - public VertexDeleteOptions ifMatch(final String ifMatch) { - this.ifMatch = ifMatch; - return this; - } -} diff --git a/src/main/java/com/arangodb/model/VertexReplaceOptions.java b/src/main/java/com/arangodb/model/VertexReplaceOptions.java deleted file mode 100644 index b8fb6da39..000000000 --- a/src/main/java/com/arangodb/model/VertexReplaceOptions.java +++ /dev/null @@ -1,64 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.model; - -/** - * @author Mark Vollmary - * - * @see API Documentation - */ -public class VertexReplaceOptions { - - private Boolean waitForSync; - private String ifMatch; - - public VertexReplaceOptions() { - super(); - } - - public Boolean getWaitForSync() { - return waitForSync; - } - - /** - * @param waitForSync - * Wait until document has been synced to disk. - * @return options - */ - public VertexReplaceOptions waitForSync(final Boolean waitForSync) { - this.waitForSync = waitForSync; - return this; - } - - public String getIfMatch() { - return ifMatch; - } - - /** - * @param ifMatch - * replace a document based on target revision - * @return options - */ - public VertexReplaceOptions ifMatch(final String ifMatch) { - this.ifMatch = ifMatch; - return this; - } -} diff --git a/src/main/java/com/arangodb/model/VertexUpdateOptions.java b/src/main/java/com/arangodb/model/VertexUpdateOptions.java deleted file mode 100644 index a90fa1ac3..000000000 --- a/src/main/java/com/arangodb/model/VertexUpdateOptions.java +++ /dev/null @@ -1,82 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.model; - -/** - * @author Mark Vollmary - * - * @see API Documentation - */ -public class VertexUpdateOptions { - - private Boolean keepNull; - private Boolean waitForSync; - private String ifMatch; - - public VertexUpdateOptions() { - super(); - } - - public Boolean getKeepNull() { - return keepNull; - } - - /** - * @param keepNull - * If the intention is to delete existing attributes with the patch command, the URL query parameter - * keepNull can be used with a value of false. This will modify the behavior of the patch command to - * remove any attributes from the existing document that are contained in the patch document with an - * attribute value of null. - * @return options - */ - public VertexUpdateOptions keepNull(final Boolean keepNull) { - this.keepNull = keepNull; - return this; - } - - public Boolean getWaitForSync() { - return waitForSync; - } - - /** - * @param waitForSync - * Wait until document has been synced to disk. - * @return options - */ - public VertexUpdateOptions waitForSync(final Boolean waitForSync) { - this.waitForSync = waitForSync; - return this; - } - - public String getIfMatch() { - return ifMatch; - } - - /** - * @param ifMatch - * replace a document based on target revision - * @return options - */ - public VertexUpdateOptions ifMatch(final String ifMatch) { - this.ifMatch = ifMatch; - return this; - } -} diff --git a/src/main/java/com/arangodb/util/ArangoCursorInitializer.java b/src/main/java/com/arangodb/util/ArangoCursorInitializer.java deleted file mode 100644 index faf4ea5a6..000000000 --- a/src/main/java/com/arangodb/util/ArangoCursorInitializer.java +++ /dev/null @@ -1,40 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2017 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.util; - -import com.arangodb.ArangoCursor; -import com.arangodb.entity.CursorEntity; -import com.arangodb.internal.ArangoCursorExecute; -import com.arangodb.internal.InternalArangoDatabase; - -/** - * @author Mark Vollmary - * - */ -public interface ArangoCursorInitializer { - - ArangoCursor createInstance( - final InternalArangoDatabase db, - final ArangoCursorExecute execute, - final Class type, - final CursorEntity result); - -} diff --git a/src/main/java/com/arangodb/util/ArangoSerializer.java b/src/main/java/com/arangodb/util/ArangoSerializer.java deleted file mode 100644 index bc5de232f..000000000 --- a/src/main/java/com/arangodb/util/ArangoSerializer.java +++ /dev/null @@ -1,217 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.util; - -import java.lang.reflect.Type; -import java.util.Collections; -import java.util.Map; - -import com.arangodb.ArangoDBException; -import com.arangodb.velocypack.VPackSlice; - -/** - * @author Mark Vollmary - * - */ -public interface ArangoSerializer { - - public static class Options { - private Type type; - private boolean serializeNullValues; - private Map additionalFields; - private boolean stringAsJson; - - public Options() { - super(); - serializeNullValues = false; - stringAsJson = false; - additionalFields = Collections. emptyMap(); - } - - /** - * - * @param type - * The source type of the Object. - * @return options - */ - public Options type(final Type type) { - this.type = type; - return this; - } - - /** - * - * @param serializeNullValues - * Whether or not null values should be excluded from serialization. - * @return options - */ - public Options serializeNullValues(final boolean serializeNullValues) { - this.serializeNullValues = serializeNullValues; - return this; - } - - /** - * - * @param additionalFields - * Additional Key/Value pairs to include in the created VelocyPack. - * @return options - */ - public Options additionalFields(final Map additionalFields) { - this.additionalFields = additionalFields; - return this; - } - - /** - * - * @param stringAsJson - * Wheter or not String should be interpreted as json - * @return options - */ - public Options stringAsJson(final boolean stringAsJson) { - this.stringAsJson = stringAsJson; - return this; - } - - public Type getType() { - return type; - } - - public boolean isSerializeNullValues() { - return serializeNullValues; - } - - public Map getAdditionalFields() { - return additionalFields; - } - - public boolean isStringAsJson() { - return stringAsJson; - } - - } - - /** - * Serialize a given Object to VelocyPack - * - * @param entity - * The Object to serialize. If it is from type String, it will be handled as a Json. - * @return The serialized VelocyPack - * @throws ArangoDBException - */ - VPackSlice serialize(final Object entity) throws ArangoDBException; - - /** - * Serialize a given Object to VelocyPack - * - * @param entity - * The Object to serialize. If it is from type String, it will be handled as a Json. - * @param options - * Additional options - * @return the serialized VelocyPack - * @throws ArangoDBException - */ - VPackSlice serialize(final Object entity, final Options options) throws ArangoDBException; - - /** - * Serialize a given Object to VelocyPack - * - * @deprecated use {@link #serialize(Object, Options)} instead - * @param entity - * The Object to serialize. If it is from type String, it will be handled as a Json. - * @param serializeNullValues - * Whether or not null values should be excluded from serialization. - * @return the serialized VelocyPack - * @throws ArangoDBException - */ - @Deprecated - VPackSlice serialize(final Object entity, final boolean serializeNullValues) throws ArangoDBException; - - /** - * Serialize a given Object to VelocyPack. If the Object is from type Iterable the String will be - * interpreted as Json - * - * @deprecated use {@link #serialize(Object, Options)} instead - * @param entity - * The Object to serialize. If it is from type String, it will be handled as a Json. - * @param serializeNullValues - * Whether or not null values should be excluded from serialization. - * @param stringAsJson - * @return the serialized VelocyPack - * @throws ArangoDBException - */ - @Deprecated - VPackSlice serialize(final Object entity, final boolean serializeNullValues, final boolean stringAsJson) - throws ArangoDBException; - - /** - * Serialize a given Object to VelocyPack. This method is for serialization of types with generic parameter like - * Collection, List, Map. - * - * @deprecated use {@link #serialize(Object, Options)} instead - * @param entity - * The Object to serialize - * @param type - * The source type of the Object. - * @return the serialized VelocyPack - * @throws ArangoDBException - */ - @Deprecated - VPackSlice serialize(final Object entity, final Type type) throws ArangoDBException; - - /** - * Serialize a given Object to VelocyPack. This method is for serialization of types with generic parameter like - * Collection, List, Map. - * - * @deprecated use {@link #serialize(Object, Options)} instead - * @param entity - * The Object to serialize - * @param type - * The source type of the Object. - * @param serializeNullValues - * Whether or not null values should be excluded from serialization. - * @return the serialized VelocyPack - * @throws ArangoDBException - */ - @Deprecated - VPackSlice serialize(final Object entity, final Type type, final boolean serializeNullValues) - throws ArangoDBException; - - /** - * Serialize a given Object to VelocyPack. This method is for serialization of types with generic parameter like - * Collection, List, Map. - * - * @deprecated use {@link #serialize(Object, Options)} instead - * @param entity - * The Object to serialize - * @param type - * The source type of the Object. - * @param serializeNullValues - * Whether or not null values should be excluded from serialization. - * @param additionalFields - * Additional Key/Value pairs to include in the created VelocyPack - * @return the serialized VelocyPack - * @throws ArangoDBException - */ - @Deprecated - VPackSlice serialize(final Object entity, final Type type, final Map additionalFields) - throws ArangoDBException; - -} diff --git a/src/main/java/com/arangodb/velocystream/Request.java b/src/main/java/com/arangodb/velocystream/Request.java deleted file mode 100644 index 97ad7fd0b..000000000 --- a/src/main/java/com/arangodb/velocystream/Request.java +++ /dev/null @@ -1,122 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.velocystream; - -import java.util.HashMap; -import java.util.Map; - -import com.arangodb.velocypack.VPackSlice; -import com.arangodb.velocypack.annotations.Expose; - -/** - * @author Mark Vollmary - * - */ -public class Request { - - private int version = 1; - private int type = 1; - private final String database; - private final RequestType requestType; - private final String request; - private Map queryParam; - private Map headerParam; - @Expose(serialize = false) - private VPackSlice body; - - public Request(final String database, final RequestType requestType, final String path) { - super(); - this.database = database; - this.requestType = requestType; - this.request = path; - body = null; - queryParam = new HashMap(); - headerParam = new HashMap(); - } - - public int getVersion() { - return version; - } - - public Request setVersion(final int version) { - this.version = version; - return this; - } - - public int getType() { - return type; - } - - public Request setType(final int type) { - this.type = type; - return this; - } - - public String getDatabase() { - return database; - } - - public RequestType getRequestType() { - return requestType; - } - - public String getRequest() { - return request; - } - - public Map getQueryParam() { - if (queryParam == null) { - queryParam = new HashMap(); - } - return queryParam; - } - - public Request putQueryParam(final String key, final Object value) { - if (value != null) { - getQueryParam().put(key, value.toString()); - } - return this; - } - - public Map getHeaderParam() { - if (headerParam == null) { - headerParam = new HashMap(); - } - return headerParam; - } - - public Request putHeaderParam(final String key, final String value) { - if (value != null) { - getHeaderParam().put(key, value); - } - return this; - } - - public VPackSlice getBody() { - return body; - } - - public Request setBody(final VPackSlice body) { - this.body = body; - return this; - } - -} diff --git a/src/main/java/com/arangodb/velocystream/RequestType.java b/src/main/java/com/arangodb/velocystream/RequestType.java deleted file mode 100644 index 1cb4bbc3e..000000000 --- a/src/main/java/com/arangodb/velocystream/RequestType.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.velocystream; - -/** - * @author Mark Vollmary - * - */ -public enum RequestType { - - DELETE(0), - GET(1), - POST(2), - PUT(3), - HEAD(4), - PATCH(5), - OPTIONS(6), - VSTREAM_CRED(7), - VSTREAM_REGISTER(8), - VSTREAM_STATUS(9), - ILLEGAL(10); - - private final int type; - - private RequestType(final int type) { - this.type = type; - } - - public int getType() { - return type; - } - - public static RequestType fromType(final int type) { - for (final RequestType rType : RequestType.values()) { - if (rType.type == type) { - return rType; - } - } - return null; - } -} diff --git a/src/main/java/com/arangodb/velocystream/Response.java b/src/main/java/com/arangodb/velocystream/Response.java deleted file mode 100644 index 722b196d6..000000000 --- a/src/main/java/com/arangodb/velocystream/Response.java +++ /dev/null @@ -1,87 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.velocystream; - -import java.util.HashMap; -import java.util.Map; - -import com.arangodb.velocypack.VPackSlice; -import com.arangodb.velocypack.annotations.Expose; - -/** - * @author Mark Vollmary - * - */ -public class Response { - - private int version = 1; - private int type = 2; - private int responseCode; - private Map meta; - @Expose(deserialize = false) - private VPackSlice body = null; - - public Response() { - super(); - meta = new HashMap(); - } - - public int getVersion() { - return version; - } - - public void setVersion(final int version) { - this.version = version; - } - - public int getType() { - return type; - } - - public void setType(final int type) { - this.type = type; - } - - public int getResponseCode() { - return responseCode; - } - - public void setResponseCode(final int responseCode) { - this.responseCode = responseCode; - } - - public Map getMeta() { - return meta; - } - - public void setMeta(final Map meta) { - this.meta = meta; - } - - public VPackSlice getBody() { - return body; - } - - public void setBody(final VPackSlice body) { - this.body = body; - } - -} diff --git a/src/test/java/com/arangodb/ArangoCollectionTest.java b/src/test/java/com/arangodb/ArangoCollectionTest.java deleted file mode 100644 index 334a0f7c9..000000000 --- a/src/test/java/com/arangodb/ArangoCollectionTest.java +++ /dev/null @@ -1,1951 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb; - -import static org.hamcrest.Matchers.anyOf; -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.empty; -import static org.hamcrest.Matchers.hasItem; -import static org.hamcrest.Matchers.hasItems; -import static org.hamcrest.Matchers.instanceOf; -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.isOneOf; -import static org.hamcrest.Matchers.not; -import static org.hamcrest.Matchers.notNullValue; -import static org.hamcrest.Matchers.nullValue; -import static org.hamcrest.Matchers.startsWith; -import static org.junit.Assert.assertThat; -import static org.junit.Assert.fail; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.Map; - -import org.junit.After; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -import com.arangodb.ArangoDB.Builder; -import com.arangodb.entity.BaseDocument; -import com.arangodb.entity.BaseEdgeDocument; -import com.arangodb.entity.CollectionEntity; -import com.arangodb.entity.CollectionPropertiesEntity; -import com.arangodb.entity.CollectionRevisionEntity; -import com.arangodb.entity.CollectionType; -import com.arangodb.entity.DocumentCreateEntity; -import com.arangodb.entity.DocumentDeleteEntity; -import com.arangodb.entity.DocumentImportEntity; -import com.arangodb.entity.DocumentUpdateEntity; -import com.arangodb.entity.IndexEntity; -import com.arangodb.entity.IndexType; -import com.arangodb.entity.MultiDocumentEntity; -import com.arangodb.entity.Permissions; -import com.arangodb.entity.ServerRole; -import com.arangodb.model.CollectionCreateOptions; -import com.arangodb.model.CollectionPropertiesOptions; -import com.arangodb.model.DocumentCreateOptions; -import com.arangodb.model.DocumentDeleteOptions; -import com.arangodb.model.DocumentExistsOptions; -import com.arangodb.model.DocumentImportOptions; -import com.arangodb.model.DocumentImportOptions.OnDuplicate; -import com.arangodb.model.DocumentReadOptions; -import com.arangodb.model.DocumentReplaceOptions; -import com.arangodb.model.DocumentUpdateOptions; - -/** - * @author Mark Vollmary - * - */ -@RunWith(Parameterized.class) -public class ArangoCollectionTest extends BaseTest { - - private static final String COLLECTION_NAME = "db_collection_test"; - private static final String EDGE_COLLECTION_NAME = "db_edge_collection_test"; - - public ArangoCollectionTest(final Builder builder) { - super(builder); - db.createCollection(COLLECTION_NAME, null); - } - - @After - public void teardown() { - db.collection(COLLECTION_NAME).truncate(); - } - - @Test - public void insertDocument() { - final DocumentCreateEntity doc = db.collection(COLLECTION_NAME).insertDocument(new BaseDocument(), - null); - assertThat(doc, is(notNullValue())); - assertThat(doc.getId(), is(notNullValue())); - assertThat(doc.getKey(), is(notNullValue())); - assertThat(doc.getRev(), is(notNullValue())); - assertThat(doc.getNew(), is(nullValue())); - assertThat(doc.getId(), is(COLLECTION_NAME + "/" + doc.getKey())); - } - - @Test - public void insertDocumentUpdateRev() { - final BaseDocument doc = new BaseDocument(); - final DocumentCreateEntity createResult = db.collection(COLLECTION_NAME).insertDocument(doc, - null); - assertThat(doc.getRevision(), is(createResult.getRev())); - } - - @Test - public void insertDocumentReturnNew() { - final DocumentCreateOptions options = new DocumentCreateOptions().returnNew(true); - final DocumentCreateEntity doc = db.collection(COLLECTION_NAME).insertDocument(new BaseDocument(), - options); - assertThat(doc, is(notNullValue())); - assertThat(doc.getId(), is(notNullValue())); - assertThat(doc.getKey(), is(notNullValue())); - assertThat(doc.getRev(), is(notNullValue())); - assertThat(doc.getNew(), is(notNullValue())); - } - - @Test - public void insertDocumentWaitForSync() { - final DocumentCreateOptions options = new DocumentCreateOptions().waitForSync(true); - final DocumentCreateEntity doc = db.collection(COLLECTION_NAME).insertDocument(new BaseDocument(), - options); - assertThat(doc, is(notNullValue())); - assertThat(doc.getId(), is(notNullValue())); - assertThat(doc.getKey(), is(notNullValue())); - assertThat(doc.getRev(), is(notNullValue())); - assertThat(doc.getNew(), is(nullValue())); - } - - @Test - public void insertDocumentAsJson() { - final DocumentCreateEntity doc = db.collection(COLLECTION_NAME) - .insertDocument("{\"_key\":\"docRaw\",\"a\":\"test\"}", null); - assertThat(doc, is(notNullValue())); - assertThat(doc.getId(), is(notNullValue())); - assertThat(doc.getKey(), is(notNullValue())); - assertThat(doc.getRev(), is(notNullValue())); - } - - @Test - public void getDocument() { - final DocumentCreateEntity createResult = db.collection(COLLECTION_NAME) - .insertDocument(new BaseDocument(), null); - assertThat(createResult.getKey(), is(notNullValue())); - final BaseDocument readResult = db.collection(COLLECTION_NAME).getDocument(createResult.getKey(), - BaseDocument.class, null); - assertThat(readResult.getKey(), is(createResult.getKey())); - assertThat(readResult.getId(), is(COLLECTION_NAME + "/" + createResult.getKey())); - } - - @Test - public void getDocumentIfMatch() { - final DocumentCreateEntity createResult = db.collection(COLLECTION_NAME) - .insertDocument(new BaseDocument(), null); - assertThat(createResult.getKey(), is(notNullValue())); - final DocumentReadOptions options = new DocumentReadOptions().ifMatch(createResult.getRev()); - final BaseDocument readResult = db.collection(COLLECTION_NAME).getDocument(createResult.getKey(), - BaseDocument.class, options); - assertThat(readResult.getKey(), is(createResult.getKey())); - assertThat(readResult.getId(), is(COLLECTION_NAME + "/" + createResult.getKey())); - } - - @Test - public void getDocumentIfMatchFail() { - final DocumentCreateEntity createResult = db.collection(COLLECTION_NAME) - .insertDocument(new BaseDocument(), null); - assertThat(createResult.getKey(), is(notNullValue())); - final DocumentReadOptions options = new DocumentReadOptions().ifMatch("no"); - final BaseDocument document = db.collection(COLLECTION_NAME).getDocument(createResult.getKey(), - BaseDocument.class, options); - assertThat(document, is(nullValue())); - } - - @Test - public void getDocumentIfNoneMatch() { - final DocumentCreateEntity createResult = db.collection(COLLECTION_NAME) - .insertDocument(new BaseDocument(), null); - assertThat(createResult.getKey(), is(notNullValue())); - final DocumentReadOptions options = new DocumentReadOptions().ifNoneMatch("no"); - final BaseDocument readResult = db.collection(COLLECTION_NAME).getDocument(createResult.getKey(), - BaseDocument.class, options); - assertThat(readResult.getKey(), is(createResult.getKey())); - assertThat(readResult.getId(), is(COLLECTION_NAME + "/" + createResult.getKey())); - } - - @Test - public void getDocumentIfNoneMatchFail() { - final DocumentCreateEntity createResult = db.collection(COLLECTION_NAME) - .insertDocument(new BaseDocument(), null); - assertThat(createResult.getKey(), is(notNullValue())); - final DocumentReadOptions options = new DocumentReadOptions().ifNoneMatch(createResult.getRev()); - final BaseDocument document = db.collection(COLLECTION_NAME).getDocument(createResult.getKey(), - BaseDocument.class, options); - assertThat(document, is(nullValue())); - } - - @Test - public void getDocumentAsJson() { - db.collection(COLLECTION_NAME).insertDocument("{\"_key\":\"docRaw\",\"a\":\"test\"}", null); - final String readResult = db.collection(COLLECTION_NAME).getDocument("docRaw", String.class, null); - assertThat(readResult.contains("\"_key\":\"docRaw\""), is(true)); - assertThat(readResult.contains("\"_id\":\"db_collection_test\\/docRaw\""), is(true)); - } - - @Test - public void getDocumentNotFound() { - final BaseDocument document = db.collection(COLLECTION_NAME).getDocument("no", BaseDocument.class); - assertThat(document, is(nullValue())); - } - - @Test - public void getDocumentNotFoundOptionsDefault() { - final BaseDocument document = db.collection(COLLECTION_NAME).getDocument("no", BaseDocument.class, - new DocumentReadOptions()); - assertThat(document, is(nullValue())); - } - - @Test - public void getDocumentNotFoundOptionsNull() { - final BaseDocument document = db.collection(COLLECTION_NAME).getDocument("no", BaseDocument.class, null); - assertThat(document, is(nullValue())); - } - - @Test(expected = ArangoDBException.class) - public void getDocumentNotFoundThrowException() { - db.collection(COLLECTION_NAME).getDocument("no", BaseDocument.class, - new DocumentReadOptions().catchException(false)); - } - - @Test(expected = ArangoDBException.class) - public void getDocumentWrongKey() { - db.collection(COLLECTION_NAME).getDocument("no/no", BaseDocument.class); - } - - @Test - public void getDocuments() { - final Collection values = new ArrayList(); - values.add(new BaseDocument("1")); - values.add(new BaseDocument("2")); - values.add(new BaseDocument("3")); - db.collection(COLLECTION_NAME).insertDocuments(values); - final MultiDocumentEntity documents = db.collection(COLLECTION_NAME) - .getDocuments(Arrays.asList("1", "2", "3"), BaseDocument.class); - assertThat(documents, is(notNullValue())); - assertThat(documents.getDocuments().size(), is(3)); - for (final BaseDocument document : documents.getDocuments()) { - assertThat(document.getId(), - isOneOf(COLLECTION_NAME + "/" + "1", COLLECTION_NAME + "/" + "2", COLLECTION_NAME + "/" + "3")); - } - } - - @Test - public void getDocumentsNotFound() { - final MultiDocumentEntity readResult = db.collection(COLLECTION_NAME) - .getDocuments(Collections.singleton("no"), BaseDocument.class); - assertThat(readResult, is(notNullValue())); - assertThat(readResult.getDocuments().size(), is(0)); - assertThat(readResult.getErrors().size(), is(1)); - } - - @Test - public void getDocumentsWrongKey() { - final MultiDocumentEntity readResult = db.collection(COLLECTION_NAME) - .getDocuments(Collections.singleton("no/no"), BaseDocument.class); - assertThat(readResult, is(notNullValue())); - assertThat(readResult.getDocuments().size(), is(0)); - assertThat(readResult.getErrors().size(), is(1)); - } - - @Test - public void updateDocument() { - final BaseDocument doc = new BaseDocument(); - doc.addAttribute("a", "test"); - doc.addAttribute("c", "test"); - final DocumentCreateEntity createResult = db.collection(COLLECTION_NAME).insertDocument(doc, - null); - doc.updateAttribute("a", "test1"); - doc.addAttribute("b", "test"); - doc.updateAttribute("c", null); - final DocumentUpdateEntity updateResult = db.collection(COLLECTION_NAME) - .updateDocument(createResult.getKey(), doc, null); - assertThat(updateResult, is(notNullValue())); - assertThat(updateResult.getId(), is(createResult.getId())); - assertThat(updateResult.getNew(), is(nullValue())); - assertThat(updateResult.getOld(), is(nullValue())); - assertThat(updateResult.getRev(), is(not(updateResult.getOldRev()))); - assertThat(updateResult.getOldRev(), is(createResult.getRev())); - - final BaseDocument readResult = db.collection(COLLECTION_NAME).getDocument(createResult.getKey(), - BaseDocument.class, null); - assertThat(readResult.getKey(), is(createResult.getKey())); - assertThat(readResult.getAttribute("a"), is(notNullValue())); - assertThat(String.valueOf(readResult.getAttribute("a")), is("test1")); - assertThat(readResult.getAttribute("b"), is(notNullValue())); - assertThat(String.valueOf(readResult.getAttribute("b")), is("test")); - assertThat(readResult.getRevision(), is(updateResult.getRev())); - assertThat(readResult.getProperties().keySet(), hasItem("c")); - } - - @Test - public void updateDocumentUpdateRev() { - final BaseDocument doc = new BaseDocument(); - final DocumentCreateEntity createResult = db.collection(COLLECTION_NAME).insertDocument(doc, - null); - assertThat(doc.getRevision(), is(createResult.getRev())); - final DocumentUpdateEntity updateResult = db.collection(COLLECTION_NAME) - .updateDocument(createResult.getKey(), doc, null); - assertThat(doc.getRevision(), is(updateResult.getRev())); - } - - @Test - public void updateDocumentIfMatch() { - final BaseDocument doc = new BaseDocument(); - doc.addAttribute("a", "test"); - doc.addAttribute("c", "test"); - final DocumentCreateEntity createResult = db.collection(COLLECTION_NAME).insertDocument(doc, - null); - doc.updateAttribute("a", "test1"); - doc.addAttribute("b", "test"); - doc.updateAttribute("c", null); - final DocumentUpdateOptions options = new DocumentUpdateOptions().ifMatch(createResult.getRev()); - final DocumentUpdateEntity updateResult = db.collection(COLLECTION_NAME) - .updateDocument(createResult.getKey(), doc, options); - assertThat(updateResult, is(notNullValue())); - assertThat(updateResult.getId(), is(createResult.getId())); - assertThat(updateResult.getRev(), is(not(updateResult.getOldRev()))); - assertThat(updateResult.getOldRev(), is(createResult.getRev())); - - final BaseDocument readResult = db.collection(COLLECTION_NAME).getDocument(createResult.getKey(), - BaseDocument.class, null); - assertThat(readResult.getKey(), is(createResult.getKey())); - assertThat(readResult.getAttribute("a"), is(notNullValue())); - assertThat(String.valueOf(readResult.getAttribute("a")), is("test1")); - assertThat(readResult.getAttribute("b"), is(notNullValue())); - assertThat(String.valueOf(readResult.getAttribute("b")), is("test")); - assertThat(readResult.getRevision(), is(updateResult.getRev())); - assertThat(readResult.getProperties().keySet(), hasItem("c")); - } - - @Test - public void updateDocumentIfMatchFail() { - final BaseDocument doc = new BaseDocument(); - doc.addAttribute("a", "test"); - doc.addAttribute("c", "test"); - final DocumentCreateEntity createResult = db.collection(COLLECTION_NAME).insertDocument(doc, - null); - doc.updateAttribute("a", "test1"); - doc.addAttribute("b", "test"); - doc.updateAttribute("c", null); - try { - final DocumentUpdateOptions options = new DocumentUpdateOptions().ifMatch("no"); - db.collection(COLLECTION_NAME).updateDocument(createResult.getKey(), doc, options); - fail(); - } catch (final ArangoDBException e) { - } - } - - @Test - public void updateDocumentReturnNew() { - final BaseDocument doc = new BaseDocument(); - doc.addAttribute("a", "test"); - final DocumentCreateEntity createResult = db.collection(COLLECTION_NAME).insertDocument(doc, - null); - doc.updateAttribute("a", "test1"); - doc.addAttribute("b", "test"); - final DocumentUpdateOptions options = new DocumentUpdateOptions().returnNew(true); - final DocumentUpdateEntity updateResult = db.collection(COLLECTION_NAME) - .updateDocument(createResult.getKey(), doc, options); - assertThat(updateResult, is(notNullValue())); - assertThat(updateResult.getId(), is(createResult.getId())); - assertThat(updateResult.getOldRev(), is(createResult.getRev())); - assertThat(updateResult.getNew(), is(notNullValue())); - assertThat(updateResult.getNew().getKey(), is(createResult.getKey())); - assertThat(updateResult.getNew().getRevision(), is(not(createResult.getRev()))); - assertThat(updateResult.getNew().getAttribute("a"), is(notNullValue())); - assertThat(String.valueOf(updateResult.getNew().getAttribute("a")), is("test1")); - assertThat(updateResult.getNew().getAttribute("b"), is(notNullValue())); - assertThat(String.valueOf(updateResult.getNew().getAttribute("b")), is("test")); - } - - @Test - public void updateDocumentReturnOld() { - final BaseDocument doc = new BaseDocument(); - doc.addAttribute("a", "test"); - final DocumentCreateEntity createResult = db.collection(COLLECTION_NAME).insertDocument(doc, - null); - doc.updateAttribute("a", "test1"); - doc.addAttribute("b", "test"); - final DocumentUpdateOptions options = new DocumentUpdateOptions().returnOld(true); - final DocumentUpdateEntity updateResult = db.collection(COLLECTION_NAME) - .updateDocument(createResult.getKey(), doc, options); - assertThat(updateResult, is(notNullValue())); - assertThat(updateResult.getId(), is(createResult.getId())); - assertThat(updateResult.getOldRev(), is(createResult.getRev())); - assertThat(updateResult.getOld(), is(notNullValue())); - assertThat(updateResult.getOld().getKey(), is(createResult.getKey())); - assertThat(updateResult.getOld().getRevision(), is(createResult.getRev())); - assertThat(updateResult.getOld().getAttribute("a"), is(notNullValue())); - assertThat(String.valueOf(updateResult.getOld().getAttribute("a")), is("test")); - assertThat(updateResult.getOld().getProperties().keySet(), not(hasItem("b"))); - } - - @Test - public void updateDocumentKeepNullTrue() { - final BaseDocument doc = new BaseDocument(); - doc.addAttribute("a", "test"); - final DocumentCreateEntity createResult = db.collection(COLLECTION_NAME).insertDocument(doc, - null); - doc.updateAttribute("a", null); - final DocumentUpdateOptions options = new DocumentUpdateOptions().keepNull(true); - final DocumentUpdateEntity updateResult = db.collection(COLLECTION_NAME) - .updateDocument(createResult.getKey(), doc, options); - assertThat(updateResult, is(notNullValue())); - assertThat(updateResult.getId(), is(createResult.getId())); - assertThat(updateResult.getRev(), is(not(updateResult.getOldRev()))); - assertThat(updateResult.getOldRev(), is(createResult.getRev())); - - final BaseDocument readResult = db.collection(COLLECTION_NAME).getDocument(createResult.getKey(), - BaseDocument.class, null); - assertThat(readResult.getKey(), is(createResult.getKey())); - assertThat(readResult.getProperties().keySet(), hasItem("a")); - } - - @Test - public void updateDocumentKeepNullFalse() { - final BaseDocument doc = new BaseDocument(); - doc.addAttribute("a", "test"); - final DocumentCreateEntity createResult = db.collection(COLLECTION_NAME).insertDocument(doc, - null); - doc.updateAttribute("a", null); - final DocumentUpdateOptions options = new DocumentUpdateOptions().keepNull(false); - final DocumentUpdateEntity updateResult = db.collection(COLLECTION_NAME) - .updateDocument(createResult.getKey(), doc, options); - assertThat(updateResult, is(notNullValue())); - assertThat(updateResult.getId(), is(createResult.getId())); - assertThat(updateResult.getRev(), is(not(updateResult.getOldRev()))); - assertThat(updateResult.getOldRev(), is(createResult.getRev())); - - final BaseDocument readResult = db.collection(COLLECTION_NAME).getDocument(createResult.getKey(), - BaseDocument.class, null); - assertThat(readResult.getKey(), is(createResult.getKey())); - assertThat(readResult.getId(), is(createResult.getId())); - assertThat(readResult.getRevision(), is(notNullValue())); - assertThat(readResult.getProperties().keySet(), not(hasItem("a"))); - } - - private static class TestUpdateEntity { - @SuppressWarnings("unused") - private String a, b; - } - - @Test - public void updateDocumentSerializeNullTrue() { - final TestUpdateEntity doc = new TestUpdateEntity(); - doc.a = "foo"; - doc.b = "foo"; - final DocumentCreateEntity createResult = db.collection(COLLECTION_NAME).insertDocument(doc); - final TestUpdateEntity patchDoc = new TestUpdateEntity(); - patchDoc.a = "bar"; - final DocumentUpdateEntity updateResult = db.collection(COLLECTION_NAME) - .updateDocument(createResult.getKey(), patchDoc); - assertThat(updateResult, is(notNullValue())); - assertThat(updateResult.getKey(), is(createResult.getKey())); - - final BaseDocument readResult = db.collection(COLLECTION_NAME).getDocument(createResult.getKey(), - BaseDocument.class); - assertThat(readResult.getKey(), is(createResult.getKey())); - assertThat(readResult.getProperties().keySet(), hasItem("a")); - assertThat(readResult.getAttribute("a").toString(), is("bar")); - } - - @Test - public void updateDocumentSerializeNullFalse() { - final TestUpdateEntity doc = new TestUpdateEntity(); - doc.a = "foo"; - doc.b = "foo"; - final DocumentCreateEntity createResult = db.collection(COLLECTION_NAME).insertDocument(doc); - final TestUpdateEntity patchDoc = new TestUpdateEntity(); - patchDoc.a = "bar"; - final DocumentUpdateEntity updateResult = db.collection(COLLECTION_NAME) - .updateDocument(createResult.getKey(), patchDoc, new DocumentUpdateOptions().serializeNull(false)); - assertThat(updateResult, is(notNullValue())); - assertThat(updateResult.getKey(), is(createResult.getKey())); - - final BaseDocument readResult = db.collection(COLLECTION_NAME).getDocument(createResult.getKey(), - BaseDocument.class); - assertThat(readResult.getKey(), is(createResult.getKey())); - assertThat(readResult.getProperties().keySet(), hasItems("a", "b")); - assertThat(readResult.getAttribute("a").toString(), is("bar")); - assertThat(readResult.getAttribute("b").toString(), is("foo")); - } - - @SuppressWarnings("unchecked") - @Test - public void updateDocumentMergeObjectsTrue() { - final BaseDocument doc = new BaseDocument(); - final Map a = new HashMap(); - a.put("a", "test"); - doc.addAttribute("a", a); - final DocumentCreateEntity createResult = db.collection(COLLECTION_NAME).insertDocument(doc, - null); - a.clear(); - a.put("b", "test"); - doc.updateAttribute("a", a); - final DocumentUpdateOptions options = new DocumentUpdateOptions().mergeObjects(true); - final DocumentUpdateEntity updateResult = db.collection(COLLECTION_NAME) - .updateDocument(createResult.getKey(), doc, options); - assertThat(updateResult, is(notNullValue())); - assertThat(updateResult.getId(), is(createResult.getId())); - assertThat(updateResult.getRev(), is(not(updateResult.getOldRev()))); - assertThat(updateResult.getOldRev(), is(createResult.getRev())); - - final BaseDocument readResult = db.collection(COLLECTION_NAME).getDocument(createResult.getKey(), - BaseDocument.class, null); - assertThat(readResult.getKey(), is(createResult.getKey())); - final Object aResult = readResult.getAttribute("a"); - assertThat(aResult, instanceOf(Map.class)); - final Map aMap = (Map) aResult; - assertThat(aMap.keySet(), hasItem("a")); - assertThat(aMap.keySet(), hasItem("b")); - } - - @SuppressWarnings("unchecked") - @Test - public void updateDocumentMergeObjectsFalse() { - final BaseDocument doc = new BaseDocument(); - final Map a = new HashMap(); - a.put("a", "test"); - doc.addAttribute("a", a); - final DocumentCreateEntity createResult = db.collection(COLLECTION_NAME).insertDocument(doc, - null); - a.clear(); - a.put("b", "test"); - doc.updateAttribute("a", a); - final DocumentUpdateOptions options = new DocumentUpdateOptions().mergeObjects(false); - final DocumentUpdateEntity updateResult = db.collection(COLLECTION_NAME) - .updateDocument(createResult.getKey(), doc, options); - assertThat(updateResult, is(notNullValue())); - assertThat(updateResult.getId(), is(createResult.getId())); - assertThat(updateResult.getRev(), is(not(updateResult.getOldRev()))); - assertThat(updateResult.getOldRev(), is(createResult.getRev())); - - final BaseDocument readResult = db.collection(COLLECTION_NAME).getDocument(createResult.getKey(), - BaseDocument.class, null); - assertThat(readResult.getKey(), is(createResult.getKey())); - final Object aResult = readResult.getAttribute("a"); - assertThat(aResult, instanceOf(Map.class)); - final Map aMap = (Map) aResult; - assertThat(aMap.keySet(), not(hasItem("a"))); - assertThat(aMap.keySet(), hasItem("b")); - } - - @Test - public void updateDocumentIgnoreRevsFalse() { - final BaseDocument doc = new BaseDocument(); - doc.addAttribute("a", "test"); - final DocumentCreateEntity createResult = db.collection(COLLECTION_NAME).insertDocument(doc, - null); - doc.updateAttribute("a", "test1"); - doc.setRevision("no"); - try { - final DocumentUpdateOptions options = new DocumentUpdateOptions().ignoreRevs(false); - db.collection(COLLECTION_NAME).updateDocument(createResult.getKey(), doc, options); - fail(); - } catch (final ArangoDBException e) { - } - } - - @Test - public void replaceDocument() { - final BaseDocument doc = new BaseDocument(); - doc.addAttribute("a", "test"); - final DocumentCreateEntity createResult = db.collection(COLLECTION_NAME).insertDocument(doc, - null); - doc.getProperties().clear(); - doc.addAttribute("b", "test"); - final DocumentUpdateEntity replaceResult = db.collection(COLLECTION_NAME) - .replaceDocument(createResult.getKey(), doc, null); - assertThat(replaceResult, is(notNullValue())); - assertThat(replaceResult.getId(), is(createResult.getId())); - assertThat(replaceResult.getNew(), is(nullValue())); - assertThat(replaceResult.getOld(), is(nullValue())); - assertThat(replaceResult.getRev(), is(not(replaceResult.getOldRev()))); - assertThat(replaceResult.getOldRev(), is(createResult.getRev())); - - final BaseDocument readResult = db.collection(COLLECTION_NAME).getDocument(createResult.getKey(), - BaseDocument.class, null); - assertThat(readResult.getKey(), is(createResult.getKey())); - assertThat(readResult.getRevision(), is(replaceResult.getRev())); - assertThat(readResult.getProperties().keySet(), not(hasItem("a"))); - assertThat(readResult.getAttribute("b"), is(notNullValue())); - assertThat(String.valueOf(readResult.getAttribute("b")), is("test")); - } - - @Test - public void replaceDocumentUpdateRev() { - final BaseDocument doc = new BaseDocument(); - final DocumentCreateEntity createResult = db.collection(COLLECTION_NAME).insertDocument(doc, - null); - assertThat(doc.getRevision(), is(createResult.getRev())); - final DocumentUpdateEntity replaceResult = db.collection(COLLECTION_NAME) - .replaceDocument(createResult.getKey(), doc, null); - assertThat(doc.getRevision(), is(replaceResult.getRev())); - } - - @Test - public void replaceDocumentIfMatch() { - final BaseDocument doc = new BaseDocument(); - doc.addAttribute("a", "test"); - final DocumentCreateEntity createResult = db.collection(COLLECTION_NAME).insertDocument(doc, - null); - doc.getProperties().clear(); - doc.addAttribute("b", "test"); - final DocumentReplaceOptions options = new DocumentReplaceOptions().ifMatch(createResult.getRev()); - final DocumentUpdateEntity replaceResult = db.collection(COLLECTION_NAME) - .replaceDocument(createResult.getKey(), doc, options); - assertThat(replaceResult, is(notNullValue())); - assertThat(replaceResult.getId(), is(createResult.getId())); - assertThat(replaceResult.getRev(), is(not(replaceResult.getOldRev()))); - assertThat(replaceResult.getOldRev(), is(createResult.getRev())); - - final BaseDocument readResult = db.collection(COLLECTION_NAME).getDocument(createResult.getKey(), - BaseDocument.class, null); - assertThat(readResult.getKey(), is(createResult.getKey())); - assertThat(readResult.getRevision(), is(replaceResult.getRev())); - assertThat(readResult.getProperties().keySet(), not(hasItem("a"))); - assertThat(readResult.getAttribute("b"), is(notNullValue())); - assertThat(String.valueOf(readResult.getAttribute("b")), is("test")); - } - - @Test - public void replaceDocumentIfMatchFail() { - final BaseDocument doc = new BaseDocument(); - doc.addAttribute("a", "test"); - final DocumentCreateEntity createResult = db.collection(COLLECTION_NAME).insertDocument(doc, - null); - doc.getProperties().clear(); - doc.addAttribute("b", "test"); - try { - final DocumentReplaceOptions options = new DocumentReplaceOptions().ifMatch("no"); - db.collection(COLLECTION_NAME).replaceDocument(createResult.getKey(), doc, options); - fail(); - } catch (final ArangoDBException e) { - } - } - - @Test - public void replaceDocumentIgnoreRevsFalse() { - final BaseDocument doc = new BaseDocument(); - doc.addAttribute("a", "test"); - final DocumentCreateEntity createResult = db.collection(COLLECTION_NAME).insertDocument(doc, - null); - doc.getProperties().clear(); - doc.addAttribute("b", "test"); - doc.setRevision("no"); - try { - final DocumentReplaceOptions options = new DocumentReplaceOptions().ignoreRevs(false); - db.collection(COLLECTION_NAME).replaceDocument(createResult.getKey(), doc, options); - fail(); - } catch (final ArangoDBException e) { - } - } - - @Test - public void replaceDocumentReturnNew() { - final BaseDocument doc = new BaseDocument(); - doc.addAttribute("a", "test"); - final DocumentCreateEntity createResult = db.collection(COLLECTION_NAME).insertDocument(doc, - null); - doc.getProperties().clear(); - doc.addAttribute("b", "test"); - final DocumentReplaceOptions options = new DocumentReplaceOptions().returnNew(true); - final DocumentUpdateEntity replaceResult = db.collection(COLLECTION_NAME) - .replaceDocument(createResult.getKey(), doc, options); - assertThat(replaceResult, is(notNullValue())); - assertThat(replaceResult.getId(), is(createResult.getId())); - assertThat(replaceResult.getOldRev(), is(createResult.getRev())); - assertThat(replaceResult.getNew(), is(notNullValue())); - assertThat(replaceResult.getNew().getKey(), is(createResult.getKey())); - assertThat(replaceResult.getNew().getRevision(), is(not(createResult.getRev()))); - assertThat(replaceResult.getNew().getProperties().keySet(), not(hasItem("a"))); - assertThat(replaceResult.getNew().getAttribute("b"), is(notNullValue())); - assertThat(String.valueOf(replaceResult.getNew().getAttribute("b")), is("test")); - } - - @Test - public void replaceDocumentReturnOld() { - final BaseDocument doc = new BaseDocument(); - doc.addAttribute("a", "test"); - final DocumentCreateEntity createResult = db.collection(COLLECTION_NAME).insertDocument(doc, - null); - doc.getProperties().clear(); - doc.addAttribute("b", "test"); - final DocumentReplaceOptions options = new DocumentReplaceOptions().returnOld(true); - final DocumentUpdateEntity replaceResult = db.collection(COLLECTION_NAME) - .replaceDocument(createResult.getKey(), doc, options); - assertThat(replaceResult, is(notNullValue())); - assertThat(replaceResult.getId(), is(createResult.getId())); - assertThat(replaceResult.getOldRev(), is(createResult.getRev())); - assertThat(replaceResult.getOld(), is(notNullValue())); - assertThat(replaceResult.getOld().getKey(), is(createResult.getKey())); - assertThat(replaceResult.getOld().getRevision(), is(createResult.getRev())); - assertThat(replaceResult.getOld().getAttribute("a"), is(notNullValue())); - assertThat(String.valueOf(replaceResult.getOld().getAttribute("a")), is("test")); - assertThat(replaceResult.getOld().getProperties().keySet(), not(hasItem("b"))); - } - - @Test - public void deleteDocument() { - final BaseDocument doc = new BaseDocument(); - final DocumentCreateEntity createResult = db.collection(COLLECTION_NAME).insertDocument(doc, - null); - db.collection(COLLECTION_NAME).deleteDocument(createResult.getKey(), null, null); - final BaseDocument document = db.collection(COLLECTION_NAME).getDocument(createResult.getKey(), - BaseDocument.class, null); - assertThat(document, is(nullValue())); - } - - @Test - public void deleteDocumentReturnOld() { - final BaseDocument doc = new BaseDocument(); - doc.addAttribute("a", "test"); - final DocumentCreateEntity createResult = db.collection(COLLECTION_NAME).insertDocument(doc, - null); - final DocumentDeleteOptions options = new DocumentDeleteOptions().returnOld(true); - final DocumentDeleteEntity deleteResult = db.collection(COLLECTION_NAME) - .deleteDocument(createResult.getKey(), BaseDocument.class, options); - assertThat(deleteResult.getOld(), is(notNullValue())); - assertThat(deleteResult.getOld(), instanceOf(BaseDocument.class)); - assertThat(deleteResult.getOld().getAttribute("a"), is(notNullValue())); - assertThat(String.valueOf(deleteResult.getOld().getAttribute("a")), is("test")); - } - - @Test - public void deleteDocumentIfMatch() { - final BaseDocument doc = new BaseDocument(); - final DocumentCreateEntity createResult = db.collection(COLLECTION_NAME).insertDocument(doc, - null); - final DocumentDeleteOptions options = new DocumentDeleteOptions().ifMatch(createResult.getRev()); - db.collection(COLLECTION_NAME).deleteDocument(createResult.getKey(), null, options); - final BaseDocument document = db.collection(COLLECTION_NAME).getDocument(createResult.getKey(), - BaseDocument.class, null); - assertThat(document, is(nullValue())); - } - - @Test - public void deleteDocumentIfMatchFail() { - final BaseDocument doc = new BaseDocument(); - final DocumentCreateEntity createResult = db.collection(COLLECTION_NAME).insertDocument(doc, - null); - final DocumentDeleteOptions options = new DocumentDeleteOptions().ifMatch("no"); - try { - db.collection(COLLECTION_NAME).deleteDocument(createResult.getKey(), null, options); - fail(); - } catch (final ArangoDBException e) { - } - } - - @Test - public void getIndex() { - final Collection fields = new ArrayList(); - fields.add("a"); - final IndexEntity createResult = db.collection(COLLECTION_NAME).ensureHashIndex(fields, null); - final IndexEntity readResult = db.collection(COLLECTION_NAME).getIndex(createResult.getId()); - assertThat(readResult.getId(), is(createResult.getId())); - assertThat(readResult.getType(), is(createResult.getType())); - } - - @Test - public void getIndexByKey() { - final Collection fields = new ArrayList(); - fields.add("a"); - final IndexEntity createResult = db.collection(COLLECTION_NAME).ensureHashIndex(fields, null); - final IndexEntity readResult = db.collection(COLLECTION_NAME).getIndex(createResult.getId().split("/")[1]); - assertThat(readResult.getId(), is(createResult.getId())); - assertThat(readResult.getType(), is(createResult.getType())); - } - - @Test - public void deleteIndex() { - final Collection fields = new ArrayList(); - fields.add("a"); - final IndexEntity createResult = db.collection(COLLECTION_NAME).ensureHashIndex(fields, null); - final String id = db.collection(COLLECTION_NAME).deleteIndex(createResult.getId()); - assertThat(id, is(createResult.getId())); - try { - db.getIndex(id); - fail(); - } catch (final ArangoDBException e) { - } - } - - @Test - public void deleteIndexByKey() { - final Collection fields = new ArrayList(); - fields.add("a"); - final IndexEntity createResult = db.collection(COLLECTION_NAME).ensureHashIndex(fields, null); - final String id = db.collection(COLLECTION_NAME).deleteIndex(createResult.getId().split("/")[1]); - assertThat(id, is(createResult.getId())); - try { - db.getIndex(id); - fail(); - } catch (final ArangoDBException e) { - } - } - - @Test - public void createHashIndex() { - final Collection fields = new ArrayList(); - fields.add("a"); - fields.add("b"); - final IndexEntity indexResult = db.collection(COLLECTION_NAME).ensureHashIndex(fields, null); - assertThat(indexResult, is(notNullValue())); - assertThat(indexResult.getConstraint(), is(nullValue())); - assertThat(indexResult.getFields(), hasItem("a")); - assertThat(indexResult.getFields(), hasItem("b")); - assertThat(indexResult.getGeoJson(), is(nullValue())); - assertThat(indexResult.getId(), startsWith(COLLECTION_NAME)); - assertThat(indexResult.getIsNewlyCreated(), is(true)); - assertThat(indexResult.getMinLength(), is(nullValue())); - if (arangoDB.getRole() == ServerRole.SINGLE) { - assertThat(indexResult.getSelectivityEstimate(), is(1)); - } - assertThat(indexResult.getSparse(), is(false)); - assertThat(indexResult.getType(), is(IndexType.hash)); - assertThat(indexResult.getUnique(), is(false)); - } - - @Test - public void createGeoIndex() { - final Collection fields = new ArrayList(); - fields.add("a"); - final IndexEntity indexResult = db.collection(COLLECTION_NAME).ensureGeoIndex(fields, null); - assertThat(indexResult, is(notNullValue())); - assertThat(indexResult.getConstraint(), is(false)); - assertThat(indexResult.getFields(), hasItem("a")); - assertThat(indexResult.getGeoJson(), is(false)); - assertThat(indexResult.getId(), startsWith(COLLECTION_NAME)); - assertThat(indexResult.getIsNewlyCreated(), is(true)); - assertThat(indexResult.getMinLength(), is(nullValue())); - assertThat(indexResult.getSparse(), is(true)); - assertThat(indexResult.getType(), is(IndexType.geo1)); - assertThat(indexResult.getUnique(), is(false)); - } - - @Test - public void createGeo2Index() { - final Collection fields = new ArrayList(); - fields.add("a"); - fields.add("b"); - final IndexEntity indexResult = db.collection(COLLECTION_NAME).ensureGeoIndex(fields, null); - assertThat(indexResult, is(notNullValue())); - assertThat(indexResult.getConstraint(), is(false)); - assertThat(indexResult.getFields(), hasItem("a")); - assertThat(indexResult.getFields(), hasItem("b")); - assertThat(indexResult.getGeoJson(), is(nullValue())); - assertThat(indexResult.getId(), startsWith(COLLECTION_NAME)); - assertThat(indexResult.getIsNewlyCreated(), is(true)); - assertThat(indexResult.getMinLength(), is(nullValue())); - assertThat(indexResult.getSparse(), is(true)); - assertThat(indexResult.getType(), is(IndexType.geo2)); - assertThat(indexResult.getUnique(), is(false)); - } - - @Test - public void createSkiplistIndex() { - final Collection fields = new ArrayList(); - fields.add("a"); - fields.add("b"); - final IndexEntity indexResult = db.collection(COLLECTION_NAME).ensureSkiplistIndex(fields, null); - assertThat(indexResult, is(notNullValue())); - assertThat(indexResult.getConstraint(), is(nullValue())); - assertThat(indexResult.getFields(), hasItem("a")); - assertThat(indexResult.getFields(), hasItem("b")); - assertThat(indexResult.getGeoJson(), is(nullValue())); - assertThat(indexResult.getId(), startsWith(COLLECTION_NAME)); - assertThat(indexResult.getIsNewlyCreated(), is(true)); - assertThat(indexResult.getMinLength(), is(nullValue())); - assertThat(indexResult.getSparse(), is(false)); - assertThat(indexResult.getType(), is(IndexType.skiplist)); - assertThat(indexResult.getUnique(), is(false)); - } - - @Test - public void createPersistentIndex() { - final Collection fields = new ArrayList(); - fields.add("a"); - fields.add("b"); - final IndexEntity indexResult = db.collection(COLLECTION_NAME).ensurePersistentIndex(fields, null); - assertThat(indexResult, is(notNullValue())); - assertThat(indexResult.getConstraint(), is(nullValue())); - assertThat(indexResult.getFields(), hasItem("a")); - assertThat(indexResult.getFields(), hasItem("b")); - assertThat(indexResult.getGeoJson(), is(nullValue())); - assertThat(indexResult.getId(), startsWith(COLLECTION_NAME)); - assertThat(indexResult.getIsNewlyCreated(), is(true)); - assertThat(indexResult.getMinLength(), is(nullValue())); - assertThat(indexResult.getSparse(), is(false)); - assertThat(indexResult.getType(), is(IndexType.persistent)); - assertThat(indexResult.getUnique(), is(false)); - } - - @Test - public void createFulltextIndex() { - final Collection fields = new ArrayList(); - fields.add("a"); - final IndexEntity indexResult = db.collection(COLLECTION_NAME).ensureFulltextIndex(fields, null); - assertThat(indexResult, is(notNullValue())); - assertThat(indexResult.getConstraint(), is(nullValue())); - assertThat(indexResult.getFields(), hasItem("a")); - assertThat(indexResult.getGeoJson(), is(nullValue())); - assertThat(indexResult.getId(), startsWith(COLLECTION_NAME)); - assertThat(indexResult.getIsNewlyCreated(), is(true)); - assertThat(indexResult.getSparse(), is(true)); - assertThat(indexResult.getType(), is(IndexType.fulltext)); - assertThat(indexResult.getUnique(), is(false)); - } - - @Test - public void getIndexes() { - final Collection fields = new ArrayList(); - fields.add("a"); - db.collection(COLLECTION_NAME).ensureHashIndex(fields, null); - final Collection indexes = db.collection(COLLECTION_NAME).getIndexes(); - assertThat(indexes, is(notNullValue())); - assertThat(indexes.size(), is(2)); - for (final IndexEntity i : indexes) { - assertThat(i.getType(), anyOf(is(IndexType.primary), is(IndexType.hash))); - if (i.getType() == IndexType.hash) { - assertThat(i.getFields().size(), is(1)); - assertThat(i.getFields(), hasItem("a")); - } - } - } - - @Test - public void getEdgeIndex() { - try { - db.createCollection(EDGE_COLLECTION_NAME, new CollectionCreateOptions().type(CollectionType.EDGES)); - final Collection indexes = db.collection(EDGE_COLLECTION_NAME).getIndexes(); - assertThat(indexes, is(notNullValue())); - assertThat(indexes.size(), is(2)); - for (final IndexEntity i : indexes) { - assertThat(i.getType(), anyOf(is(IndexType.primary), is(IndexType.edge))); - } - } finally { - try { - db.collection(EDGE_COLLECTION_NAME).drop(); - } catch (final ArangoDBException e) { - } - } - } - - @Test - public void exists() { - assertThat(db.collection(COLLECTION_NAME).exists(), is(true)); - assertThat(db.collection(COLLECTION_NAME + "no").exists(), is(false)); - } - - @Test - public void truncate() { - final BaseDocument doc = new BaseDocument(); - db.collection(COLLECTION_NAME).insertDocument(doc, null); - final BaseDocument readResult = db.collection(COLLECTION_NAME).getDocument(doc.getKey(), BaseDocument.class, - null); - assertThat(readResult.getKey(), is(doc.getKey())); - final CollectionEntity truncateResult = db.collection(COLLECTION_NAME).truncate(); - assertThat(truncateResult, is(notNullValue())); - assertThat(truncateResult.getId(), is(notNullValue())); - final BaseDocument document = db.collection(COLLECTION_NAME).getDocument(doc.getKey(), BaseDocument.class, - null); - assertThat(document, is(nullValue())); - } - - @Test - public void getCount() { - final CollectionPropertiesEntity countEmpty = db.collection(COLLECTION_NAME).count(); - assertThat(countEmpty, is(notNullValue())); - assertThat(countEmpty.getCount(), is(0L)); - db.collection(COLLECTION_NAME).insertDocument("{}", null); - final CollectionPropertiesEntity count = db.collection(COLLECTION_NAME).count(); - assertThat(count.getCount(), is(1L)); - } - - @Test - public void documentExists() { - final Boolean existsNot = db.collection(COLLECTION_NAME).documentExists("no", null); - assertThat(existsNot, is(false)); - db.collection(COLLECTION_NAME).insertDocument("{\"_key\":\"abc\"}", null); - final Boolean exists = db.collection(COLLECTION_NAME).documentExists("abc", null); - assertThat(exists, is(true)); - } - - @Test(expected = ArangoDBException.class) - public void documentExistsThrowExcpetion() { - db.collection(COLLECTION_NAME).documentExists("no", new DocumentExistsOptions().catchException(false)); - } - - @Test - public void documentExistsIfMatch() { - final DocumentCreateEntity createResult = db.collection(COLLECTION_NAME) - .insertDocument("{\"_key\":\"abc\"}", null); - final DocumentExistsOptions options = new DocumentExistsOptions().ifMatch(createResult.getRev()); - final Boolean exists = db.collection(COLLECTION_NAME).documentExists("abc", options); - assertThat(exists, is(true)); - } - - @Test - public void documentExistsIfMatchFail() { - db.collection(COLLECTION_NAME).insertDocument("{\"_key\":\"abc\"}", null); - final DocumentExistsOptions options = new DocumentExistsOptions().ifMatch("no"); - final Boolean exists = db.collection(COLLECTION_NAME).documentExists("abc", options); - assertThat(exists, is(false)); - } - - @Test - public void documentExistsIfNoneMatch() { - db.collection(COLLECTION_NAME).insertDocument("{\"_key\":\"abc\"}", null); - final DocumentExistsOptions options = new DocumentExistsOptions().ifNoneMatch("no"); - final Boolean exists = db.collection(COLLECTION_NAME).documentExists("abc", options); - assertThat(exists, is(true)); - } - - @Test - public void documentExistsIfNoneMatchFail() { - final DocumentCreateEntity createResult = db.collection(COLLECTION_NAME) - .insertDocument("{\"_key\":\"abc\"}", null); - final DocumentExistsOptions options = new DocumentExistsOptions().ifNoneMatch(createResult.getRev()); - final Boolean exists = db.collection(COLLECTION_NAME).documentExists("abc", options); - assertThat(exists, is(false)); - } - - @Test - public void insertDocuments() { - final Collection values = new ArrayList(); - values.add(new BaseDocument()); - values.add(new BaseDocument()); - values.add(new BaseDocument()); - final MultiDocumentEntity> docs = db.collection(COLLECTION_NAME) - .insertDocuments(values, null); - assertThat(docs, is(notNullValue())); - assertThat(docs.getDocuments(), is(notNullValue())); - assertThat(docs.getDocuments().size(), is(3)); - assertThat(docs.getErrors(), is(notNullValue())); - assertThat(docs.getErrors().size(), is(0)); - } - - @Test - public void insertDocumentsJson() { - final Collection values = new ArrayList(); - values.add("{}"); - values.add("{}"); - values.add("{}"); - final MultiDocumentEntity> docs = db.collection(COLLECTION_NAME) - .insertDocuments(values); - assertThat(docs, is(notNullValue())); - assertThat(docs.getDocuments(), is(notNullValue())); - assertThat(docs.getDocuments().size(), is(3)); - assertThat(docs.getErrors(), is(notNullValue())); - assertThat(docs.getErrors().size(), is(0)); - } - - @Test - public void insertDocumentsOne() { - final Collection values = new ArrayList(); - values.add(new BaseDocument()); - final MultiDocumentEntity> docs = db.collection(COLLECTION_NAME) - .insertDocuments(values, null); - assertThat(docs, is(notNullValue())); - assertThat(docs.getDocuments(), is(notNullValue())); - assertThat(docs.getDocuments().size(), is(1)); - assertThat(docs.getErrors(), is(notNullValue())); - assertThat(docs.getErrors().size(), is(0)); - } - - @Test - public void insertDocumentsEmpty() { - final Collection values = new ArrayList(); - final MultiDocumentEntity> docs = db.collection(COLLECTION_NAME) - .insertDocuments(values, null); - assertThat(docs, is(notNullValue())); - assertThat(docs.getDocuments(), is(notNullValue())); - assertThat(docs.getDocuments().size(), is(0)); - assertThat(docs.getErrors(), is(notNullValue())); - assertThat(docs.getErrors().size(), is(0)); - } - - @Test - public void insertDocumentsReturnNew() { - final Collection values = new ArrayList(); - values.add(new BaseDocument()); - values.add(new BaseDocument()); - values.add(new BaseDocument()); - final DocumentCreateOptions options = new DocumentCreateOptions().returnNew(true); - final MultiDocumentEntity> docs = db.collection(COLLECTION_NAME) - .insertDocuments(values, options); - assertThat(docs, is(notNullValue())); - assertThat(docs.getDocuments(), is(notNullValue())); - assertThat(docs.getDocuments().size(), is(3)); - assertThat(docs.getErrors(), is(notNullValue())); - assertThat(docs.getErrors().size(), is(0)); - for (final DocumentCreateEntity doc : docs.getDocuments()) { - assertThat(doc.getNew(), is(notNullValue())); - final BaseDocument baseDocument = doc.getNew(); - assertThat(baseDocument.getKey(), is(notNullValue())); - } - - } - - @Test - public void insertDocumentsFail() { - final Collection values = new ArrayList(); - values.add(new BaseDocument("1")); - values.add(new BaseDocument("2")); - values.add(new BaseDocument("2")); - final MultiDocumentEntity> docs = db.collection(COLLECTION_NAME) - .insertDocuments(values); - assertThat(docs, is(notNullValue())); - assertThat(docs.getDocuments(), is(notNullValue())); - assertThat(docs.getDocuments().size(), is(2)); - assertThat(docs.getErrors(), is(notNullValue())); - assertThat(docs.getErrors().size(), is(1)); - assertThat(docs.getErrors().iterator().next().getErrorNum(), is(1210)); - } - - @Test - public void importDocuments() { - final Collection values = new ArrayList(); - values.add(new BaseDocument()); - values.add(new BaseDocument()); - values.add(new BaseDocument()); - final DocumentImportEntity docs = db.collection(COLLECTION_NAME).importDocuments(values); - assertThat(docs, is(notNullValue())); - assertThat(docs.getCreated(), is(values.size())); - assertThat(docs.getEmpty(), is(0)); - assertThat(docs.getErrors(), is(0)); - assertThat(docs.getIgnored(), is(0)); - assertThat(docs.getUpdated(), is(0)); - assertThat(docs.getDetails(), is(empty())); - } - - @Test - public void importDocumentsJsonList() { - final Collection values = new ArrayList(); - values.add("{}"); - values.add("{}"); - values.add("{}"); - final DocumentImportEntity docs = db.collection(COLLECTION_NAME).importDocuments(values); - assertThat(docs, is(notNullValue())); - assertThat(docs.getCreated(), is(values.size())); - assertThat(docs.getEmpty(), is(0)); - assertThat(docs.getErrors(), is(0)); - assertThat(docs.getIgnored(), is(0)); - assertThat(docs.getUpdated(), is(0)); - assertThat(docs.getDetails(), is(empty())); - } - - @Test - public void importDocumentsDuplicateDefaultError() { - final Collection values = new ArrayList(); - values.add(new BaseDocument("1")); - values.add(new BaseDocument("2")); - values.add(new BaseDocument("2")); - final DocumentImportEntity docs = db.collection(COLLECTION_NAME).importDocuments(values); - assertThat(docs, is(notNullValue())); - assertThat(docs.getCreated(), is(2)); - assertThat(docs.getEmpty(), is(0)); - assertThat(docs.getErrors(), is(1)); - assertThat(docs.getIgnored(), is(0)); - assertThat(docs.getUpdated(), is(0)); - assertThat(docs.getDetails(), is(empty())); - } - - @Test - public void importDocumentsDuplicateError() { - final Collection values = new ArrayList(); - values.add(new BaseDocument("1")); - values.add(new BaseDocument("2")); - values.add(new BaseDocument("2")); - final DocumentImportEntity docs = db.collection(COLLECTION_NAME).importDocuments(values, - new DocumentImportOptions().onDuplicate(OnDuplicate.error)); - assertThat(docs, is(notNullValue())); - assertThat(docs.getCreated(), is(2)); - assertThat(docs.getEmpty(), is(0)); - assertThat(docs.getErrors(), is(1)); - assertThat(docs.getIgnored(), is(0)); - assertThat(docs.getUpdated(), is(0)); - assertThat(docs.getDetails(), is(empty())); - } - - @Test - public void importDocumentsDuplicateIgnore() { - final Collection values = new ArrayList(); - values.add(new BaseDocument("1")); - values.add(new BaseDocument("2")); - values.add(new BaseDocument("2")); - final DocumentImportEntity docs = db.collection(COLLECTION_NAME).importDocuments(values, - new DocumentImportOptions().onDuplicate(OnDuplicate.ignore)); - assertThat(docs, is(notNullValue())); - assertThat(docs.getCreated(), is(2)); - assertThat(docs.getEmpty(), is(0)); - assertThat(docs.getErrors(), is(0)); - assertThat(docs.getIgnored(), is(1)); - assertThat(docs.getUpdated(), is(0)); - assertThat(docs.getDetails(), is(empty())); - } - - @Test - public void importDocumentsDuplicateReplace() { - final Collection values = new ArrayList(); - values.add(new BaseDocument("1")); - values.add(new BaseDocument("2")); - values.add(new BaseDocument("2")); - final DocumentImportEntity docs = db.collection(COLLECTION_NAME).importDocuments(values, - new DocumentImportOptions().onDuplicate(OnDuplicate.replace)); - assertThat(docs, is(notNullValue())); - assertThat(docs.getCreated(), is(2)); - assertThat(docs.getEmpty(), is(0)); - assertThat(docs.getErrors(), is(0)); - assertThat(docs.getIgnored(), is(0)); - assertThat(docs.getUpdated(), is(1)); - assertThat(docs.getDetails(), is(empty())); - } - - @Test - public void importDocumentsDuplicateUpdate() { - final Collection values = new ArrayList(); - values.add(new BaseDocument("1")); - values.add(new BaseDocument("2")); - values.add(new BaseDocument("2")); - final DocumentImportEntity docs = db.collection(COLLECTION_NAME).importDocuments(values, - new DocumentImportOptions().onDuplicate(OnDuplicate.update)); - assertThat(docs, is(notNullValue())); - assertThat(docs.getCreated(), is(2)); - assertThat(docs.getEmpty(), is(0)); - assertThat(docs.getErrors(), is(0)); - assertThat(docs.getIgnored(), is(0)); - assertThat(docs.getUpdated(), is(1)); - assertThat(docs.getDetails(), is(empty())); - } - - @Test - public void importDocumentsCompleteFail() { - final Collection values = new ArrayList(); - values.add(new BaseDocument("1")); - values.add(new BaseDocument("2")); - values.add(new BaseDocument("2")); - try { - db.collection(COLLECTION_NAME).importDocuments(values, new DocumentImportOptions().complete(true)); - fail(); - } catch (final ArangoDBException e) { - assertThat(e.getMessage(), containsString("1210")); - } - } - - @Test - public void importDocumentsDetails() { - final Collection values = new ArrayList(); - values.add(new BaseDocument("1")); - values.add(new BaseDocument("2")); - values.add(new BaseDocument("2")); - final DocumentImportEntity docs = db.collection(COLLECTION_NAME).importDocuments(values, - new DocumentImportOptions().details(true)); - assertThat(docs, is(notNullValue())); - assertThat(docs.getCreated(), is(2)); - assertThat(docs.getEmpty(), is(0)); - assertThat(docs.getErrors(), is(1)); - assertThat(docs.getIgnored(), is(0)); - assertThat(docs.getUpdated(), is(0)); - assertThat(docs.getDetails().size(), is(1)); - assertThat(docs.getDetails().iterator().next(), containsString("unique constraint violated")); - } - - @Test - public void importDocumentsOverwriteFalse() { - final ArangoCollection collection = db.collection(COLLECTION_NAME); - collection.insertDocument(new BaseDocument()); - assertThat(collection.count().getCount(), is(1L)); - - final Collection values = new ArrayList(); - values.add(new BaseDocument()); - values.add(new BaseDocument()); - collection.importDocuments(values, new DocumentImportOptions().overwrite(false)); - assertThat(collection.count().getCount(), is(3L)); - } - - @Test - public void importDocumentsOverwriteTrue() { - final ArangoCollection collection = db.collection(COLLECTION_NAME); - collection.insertDocument(new BaseDocument()); - assertThat(collection.count().getCount(), is(1L)); - - final Collection values = new ArrayList(); - values.add(new BaseDocument()); - values.add(new BaseDocument()); - collection.importDocuments(values, new DocumentImportOptions().overwrite(true)); - assertThat(collection.count().getCount(), is(2L)); - } - - @Test - public void importDocumentsFromToPrefix() { - db.createCollection(COLLECTION_NAME + "_edge", new CollectionCreateOptions().type(CollectionType.EDGES)); - final ArangoCollection collection = db.collection(COLLECTION_NAME + "_edge"); - try { - final Collection values = new ArrayList(); - final String[] keys = { "1", "2" }; - for (int i = 0; i < keys.length; i++) { - values.add(new BaseEdgeDocument(keys[i], "from", "to")); - } - assertThat(values.size(), is(keys.length)); - - final DocumentImportEntity importResult = collection.importDocuments(values, - new DocumentImportOptions().fromPrefix("foo").toPrefix("bar")); - assertThat(importResult, is(notNullValue())); - assertThat(importResult.getCreated(), is(values.size())); - for (int i = 0; i < keys.length; i++) { - final BaseEdgeDocument doc = collection.getDocument(keys[i], BaseEdgeDocument.class); - assertThat(doc, is(notNullValue())); - assertThat(doc.getFrom(), is("foo/from")); - assertThat(doc.getTo(), is("bar/to")); - } - } finally { - collection.drop(); - } - } - - @Test - public void importDocumentsJson() { - final String values = "[{\"_key\":\"1\"},{\"_key\":\"2\"}]"; - final DocumentImportEntity docs = db.collection(COLLECTION_NAME).importDocuments(values); - assertThat(docs, is(notNullValue())); - assertThat(docs.getCreated(), is(2)); - assertThat(docs.getEmpty(), is(0)); - assertThat(docs.getErrors(), is(0)); - assertThat(docs.getIgnored(), is(0)); - assertThat(docs.getUpdated(), is(0)); - assertThat(docs.getDetails(), is(empty())); - } - - @Test - public void importDocumentsJsonDuplicateDefaultError() { - final String values = "[{\"_key\":\"1\"},{\"_key\":\"2\"},{\"_key\":\"2\"}]"; - final DocumentImportEntity docs = db.collection(COLLECTION_NAME).importDocuments(values); - assertThat(docs, is(notNullValue())); - assertThat(docs.getCreated(), is(2)); - assertThat(docs.getEmpty(), is(0)); - assertThat(docs.getErrors(), is(1)); - assertThat(docs.getIgnored(), is(0)); - assertThat(docs.getUpdated(), is(0)); - assertThat(docs.getDetails(), is(empty())); - } - - @Test - public void importDocumentsJsonDuplicateError() { - final String values = "[{\"_key\":\"1\"},{\"_key\":\"2\"},{\"_key\":\"2\"}]"; - final DocumentImportEntity docs = db.collection(COLLECTION_NAME).importDocuments(values, - new DocumentImportOptions().onDuplicate(OnDuplicate.error)); - assertThat(docs, is(notNullValue())); - assertThat(docs.getCreated(), is(2)); - assertThat(docs.getEmpty(), is(0)); - assertThat(docs.getErrors(), is(1)); - assertThat(docs.getIgnored(), is(0)); - assertThat(docs.getUpdated(), is(0)); - assertThat(docs.getDetails(), is(empty())); - } - - @Test - public void importDocumentsJsonDuplicateIgnore() { - final String values = "[{\"_key\":\"1\"},{\"_key\":\"2\"},{\"_key\":\"2\"}]"; - final DocumentImportEntity docs = db.collection(COLLECTION_NAME).importDocuments(values, - new DocumentImportOptions().onDuplicate(OnDuplicate.ignore)); - assertThat(docs, is(notNullValue())); - assertThat(docs.getCreated(), is(2)); - assertThat(docs.getEmpty(), is(0)); - assertThat(docs.getErrors(), is(0)); - assertThat(docs.getIgnored(), is(1)); - assertThat(docs.getUpdated(), is(0)); - assertThat(docs.getDetails(), is(empty())); - } - - @Test - public void importDocumentsJsonDuplicateReplace() { - final String values = "[{\"_key\":\"1\"},{\"_key\":\"2\"},{\"_key\":\"2\"}]"; - final DocumentImportEntity docs = db.collection(COLLECTION_NAME).importDocuments(values, - new DocumentImportOptions().onDuplicate(OnDuplicate.replace)); - assertThat(docs, is(notNullValue())); - assertThat(docs.getCreated(), is(2)); - assertThat(docs.getEmpty(), is(0)); - assertThat(docs.getErrors(), is(0)); - assertThat(docs.getIgnored(), is(0)); - assertThat(docs.getUpdated(), is(1)); - assertThat(docs.getDetails(), is(empty())); - } - - @Test - public void importDocumentsJsonDuplicateUpdate() { - final String values = "[{\"_key\":\"1\"},{\"_key\":\"2\"},{\"_key\":\"2\"}]"; - final DocumentImportEntity docs = db.collection(COLLECTION_NAME).importDocuments(values, - new DocumentImportOptions().onDuplicate(OnDuplicate.update)); - assertThat(docs, is(notNullValue())); - assertThat(docs.getCreated(), is(2)); - assertThat(docs.getEmpty(), is(0)); - assertThat(docs.getErrors(), is(0)); - assertThat(docs.getIgnored(), is(0)); - assertThat(docs.getUpdated(), is(1)); - assertThat(docs.getDetails(), is(empty())); - } - - @Test - public void importDocumentsJsonCompleteFail() { - final String values = "[{\"_key\":\"1\"},{\"_key\":\"2\"},{\"_key\":\"2\"}]"; - try { - db.collection(COLLECTION_NAME).importDocuments(values, new DocumentImportOptions().complete(true)); - fail(); - } catch (final ArangoDBException e) { - assertThat(e.getMessage(), containsString("1210")); - } - } - - @Test - public void importDocumentsJsonDetails() { - final String values = "[{\"_key\":\"1\"},{\"_key\":\"2\"},{\"_key\":\"2\"}]"; - final DocumentImportEntity docs = db.collection(COLLECTION_NAME).importDocuments(values, - new DocumentImportOptions().details(true)); - assertThat(docs, is(notNullValue())); - assertThat(docs.getCreated(), is(2)); - assertThat(docs.getEmpty(), is(0)); - assertThat(docs.getErrors(), is(1)); - assertThat(docs.getIgnored(), is(0)); - assertThat(docs.getUpdated(), is(0)); - assertThat(docs.getDetails().size(), is(1)); - assertThat(docs.getDetails().iterator().next(), containsString("unique constraint violated")); - } - - @Test - public void importDocumentsJsonOverwriteFalse() { - final ArangoCollection collection = db.collection(COLLECTION_NAME); - collection.insertDocument(new BaseDocument()); - assertThat(collection.count().getCount(), is(1L)); - - final String values = "[{\"_key\":\"1\"},{\"_key\":\"2\"}]"; - collection.importDocuments(values, new DocumentImportOptions().overwrite(false)); - assertThat(collection.count().getCount(), is(3L)); - } - - @Test - public void importDocumentsJsonOverwriteTrue() { - final ArangoCollection collection = db.collection(COLLECTION_NAME); - collection.insertDocument(new BaseDocument()); - assertThat(collection.count().getCount(), is(1L)); - - final String values = "[{\"_key\":\"1\"},{\"_key\":\"2\"}]"; - collection.importDocuments(values, new DocumentImportOptions().overwrite(true)); - assertThat(collection.count().getCount(), is(2L)); - } - - @Test - public void importDocumentsJsonFromToPrefix() { - db.createCollection(COLLECTION_NAME + "_edge", new CollectionCreateOptions().type(CollectionType.EDGES)); - final ArangoCollection collection = db.collection(COLLECTION_NAME + "_edge"); - try { - final String[] keys = { "1", "2" }; - final String values = "[{\"_key\":\"1\",\"_from\":\"from\",\"_to\":\"to\"},{\"_key\":\"2\",\"_from\":\"from\",\"_to\":\"to\"}]"; - - final DocumentImportEntity importResult = collection.importDocuments(values, - new DocumentImportOptions().fromPrefix("foo").toPrefix("bar")); - assertThat(importResult, is(notNullValue())); - assertThat(importResult.getCreated(), is(2)); - for (int i = 0; i < keys.length; i++) { - final BaseEdgeDocument doc = collection.getDocument(keys[i], BaseEdgeDocument.class); - assertThat(doc, is(notNullValue())); - assertThat(doc.getFrom(), is("foo/from")); - assertThat(doc.getTo(), is("bar/to")); - } - } finally { - collection.drop(); - } - } - - @Test - public void deleteDocumentsByKey() { - final Collection values = new ArrayList(); - { - final BaseDocument e = new BaseDocument(); - e.setKey("1"); - values.add(e); - } - { - final BaseDocument e = new BaseDocument(); - e.setKey("2"); - values.add(e); - } - db.collection(COLLECTION_NAME).insertDocuments(values, null); - final Collection keys = new ArrayList(); - keys.add("1"); - keys.add("2"); - final MultiDocumentEntity> deleteResult = db.collection(COLLECTION_NAME) - .deleteDocuments(keys, null, null); - assertThat(deleteResult, is(notNullValue())); - assertThat(deleteResult.getDocuments().size(), is(2)); - for (final DocumentDeleteEntity i : deleteResult.getDocuments()) { - assertThat(i.getKey(), anyOf(is("1"), is("2"))); - } - assertThat(deleteResult.getErrors().size(), is(0)); - } - - @Test - public void deleteDocumentsByDocuments() { - final Collection values = new ArrayList(); - { - final BaseDocument e = new BaseDocument(); - e.setKey("1"); - values.add(e); - } - { - final BaseDocument e = new BaseDocument(); - e.setKey("2"); - values.add(e); - } - db.collection(COLLECTION_NAME).insertDocuments(values, null); - final MultiDocumentEntity> deleteResult = db.collection(COLLECTION_NAME) - .deleteDocuments(values, null, null); - assertThat(deleteResult, is(notNullValue())); - assertThat(deleteResult.getDocuments().size(), is(2)); - for (final DocumentDeleteEntity i : deleteResult.getDocuments()) { - assertThat(i.getKey(), anyOf(is("1"), is("2"))); - } - assertThat(deleteResult.getErrors().size(), is(0)); - } - - @Test - public void deleteDocumentsByKeyOne() { - final Collection values = new ArrayList(); - { - final BaseDocument e = new BaseDocument(); - e.setKey("1"); - values.add(e); - } - db.collection(COLLECTION_NAME).insertDocuments(values, null); - final Collection keys = new ArrayList(); - keys.add("1"); - final MultiDocumentEntity> deleteResult = db.collection(COLLECTION_NAME) - .deleteDocuments(keys, null, null); - assertThat(deleteResult, is(notNullValue())); - assertThat(deleteResult.getDocuments().size(), is(1)); - for (final DocumentDeleteEntity i : deleteResult.getDocuments()) { - assertThat(i.getKey(), is("1")); - } - assertThat(deleteResult.getErrors().size(), is(0)); - } - - @Test - public void deleteDocumentsByDocumentOne() { - final Collection values = new ArrayList(); - { - final BaseDocument e = new BaseDocument(); - e.setKey("1"); - values.add(e); - } - db.collection(COLLECTION_NAME).insertDocuments(values, null); - final MultiDocumentEntity> deleteResult = db.collection(COLLECTION_NAME) - .deleteDocuments(values, null, null); - assertThat(deleteResult, is(notNullValue())); - assertThat(deleteResult.getDocuments().size(), is(1)); - for (final DocumentDeleteEntity i : deleteResult.getDocuments()) { - assertThat(i.getKey(), is("1")); - } - assertThat(deleteResult.getErrors().size(), is(0)); - } - - @Test - public void deleteDocumentsEmpty() { - final Collection values = new ArrayList(); - db.collection(COLLECTION_NAME).insertDocuments(values, null); - final Collection keys = new ArrayList(); - final MultiDocumentEntity> deleteResult = db.collection(COLLECTION_NAME) - .deleteDocuments(keys, null, null); - assertThat(deleteResult, is(notNullValue())); - assertThat(deleteResult.getDocuments().size(), is(0)); - assertThat(deleteResult.getErrors().size(), is(0)); - } - - @Test - public void deleteDocumentsByKeyNotExisting() { - final Collection values = new ArrayList(); - db.collection(COLLECTION_NAME).insertDocuments(values, null); - final Collection keys = new ArrayList(); - keys.add("1"); - keys.add("2"); - final MultiDocumentEntity> deleteResult = db.collection(COLLECTION_NAME) - .deleteDocuments(keys, null, null); - assertThat(deleteResult, is(notNullValue())); - assertThat(deleteResult.getDocuments().size(), is(0)); - assertThat(deleteResult.getErrors().size(), is(2)); - } - - @Test - public void deleteDocumentsByDocumentsNotExisting() { - final Collection values = new ArrayList(); - { - final BaseDocument e = new BaseDocument(); - e.setKey("1"); - values.add(e); - } - { - final BaseDocument e = new BaseDocument(); - e.setKey("2"); - values.add(e); - } - final MultiDocumentEntity> deleteResult = db.collection(COLLECTION_NAME) - .deleteDocuments(values, null, null); - assertThat(deleteResult, is(notNullValue())); - assertThat(deleteResult.getDocuments().size(), is(0)); - assertThat(deleteResult.getErrors().size(), is(2)); - } - - @Test - public void updateDocuments() { - final Collection values = new ArrayList(); - { - final BaseDocument e = new BaseDocument(); - e.setKey("1"); - values.add(e); - } - { - final BaseDocument e = new BaseDocument(); - e.setKey("2"); - values.add(e); - } - db.collection(COLLECTION_NAME).insertDocuments(values, null); - final Collection updatedValues = new ArrayList(); - for (final BaseDocument i : values) { - i.addAttribute("a", "test"); - updatedValues.add(i); - } - final MultiDocumentEntity> updateResult = db.collection(COLLECTION_NAME) - .updateDocuments(updatedValues, null); - assertThat(updateResult.getDocuments().size(), is(2)); - assertThat(updateResult.getErrors().size(), is(0)); - } - - @Test - public void updateDocumentsOne() { - final Collection values = new ArrayList(); - { - final BaseDocument e = new BaseDocument(); - e.setKey("1"); - values.add(e); - } - db.collection(COLLECTION_NAME).insertDocuments(values, null); - final Collection updatedValues = new ArrayList(); - final BaseDocument first = values.iterator().next(); - first.addAttribute("a", "test"); - updatedValues.add(first); - final MultiDocumentEntity> updateResult = db.collection(COLLECTION_NAME) - .updateDocuments(updatedValues, null); - assertThat(updateResult.getDocuments().size(), is(1)); - assertThat(updateResult.getErrors().size(), is(0)); - } - - @Test - public void updateDocumentsEmpty() { - final Collection values = new ArrayList(); - final MultiDocumentEntity> updateResult = db.collection(COLLECTION_NAME) - .updateDocuments(values, null); - assertThat(updateResult.getDocuments().size(), is(0)); - assertThat(updateResult.getErrors().size(), is(0)); - } - - @Test - public void updateDocumentsWithoutKey() { - final Collection values = new ArrayList(); - { - values.add(new BaseDocument("1")); - } - db.collection(COLLECTION_NAME).insertDocuments(values, null); - final Collection updatedValues = new ArrayList(); - for (final BaseDocument i : values) { - i.addAttribute("a", "test"); - updatedValues.add(i); - } - updatedValues.add(new BaseDocument()); - final MultiDocumentEntity> updateResult = db.collection(COLLECTION_NAME) - .updateDocuments(updatedValues, null); - assertThat(updateResult.getDocuments().size(), is(1)); - assertThat(updateResult.getErrors().size(), is(1)); - } - - @Test - public void updateDocumentsJson() { - final Collection values = new ArrayList(); - values.add("{\"_key\":\"1\"}"); - values.add("{\"_key\":\"2\"}"); - db.collection(COLLECTION_NAME).insertDocuments(values); - - final Collection updatedValues = new ArrayList(); - updatedValues.add("{\"_key\":\"1\", \"foo\":\"bar\"}"); - updatedValues.add("{\"_key\":\"2\", \"foo\":\"bar\"}"); - final MultiDocumentEntity> updateResult = db.collection(COLLECTION_NAME) - .updateDocuments(updatedValues); - assertThat(updateResult.getDocuments().size(), is(2)); - assertThat(updateResult.getErrors().size(), is(0)); - } - - @Test - public void replaceDocuments() { - final Collection values = new ArrayList(); - { - values.add(new BaseDocument("1")); - values.add(new BaseDocument("2")); - } - db.collection(COLLECTION_NAME).insertDocuments(values, null); - final Collection updatedValues = new ArrayList(); - for (final BaseDocument i : values) { - i.addAttribute("a", "test"); - updatedValues.add(i); - } - final MultiDocumentEntity> updateResult = db.collection(COLLECTION_NAME) - .replaceDocuments(updatedValues, null); - assertThat(updateResult.getDocuments().size(), is(2)); - assertThat(updateResult.getErrors().size(), is(0)); - } - - @Test - public void replaceDocumentsOne() { - final Collection values = new ArrayList(); - { - final BaseDocument e = new BaseDocument(); - e.setKey("1"); - values.add(e); - } - db.collection(COLLECTION_NAME).insertDocuments(values, null); - final Collection updatedValues = new ArrayList(); - final BaseDocument first = values.iterator().next(); - first.addAttribute("a", "test"); - updatedValues.add(first); - final MultiDocumentEntity> updateResult = db.collection(COLLECTION_NAME) - .updateDocuments(updatedValues, null); - assertThat(updateResult.getDocuments().size(), is(1)); - assertThat(updateResult.getErrors().size(), is(0)); - } - - @Test - public void replaceDocumentsEmpty() { - final Collection values = new ArrayList(); - final MultiDocumentEntity> updateResult = db.collection(COLLECTION_NAME) - .updateDocuments(values, null); - assertThat(updateResult.getDocuments().size(), is(0)); - assertThat(updateResult.getErrors().size(), is(0)); - } - - @Test - public void replaceDocumentsWithoutKey() { - final Collection values = new ArrayList(); - { - values.add(new BaseDocument("1")); - } - db.collection(COLLECTION_NAME).insertDocuments(values, null); - final Collection updatedValues = new ArrayList(); - for (final BaseDocument i : values) { - i.addAttribute("a", "test"); - updatedValues.add(i); - } - updatedValues.add(new BaseDocument()); - final MultiDocumentEntity> updateResult = db.collection(COLLECTION_NAME) - .updateDocuments(updatedValues, null); - assertThat(updateResult.getDocuments().size(), is(1)); - assertThat(updateResult.getErrors().size(), is(1)); - } - - @Test - public void replaceDocumentsJson() { - final Collection values = new ArrayList(); - values.add("{\"_key\":\"1\"}"); - values.add("{\"_key\":\"2\"}"); - db.collection(COLLECTION_NAME).insertDocuments(values); - - final Collection updatedValues = new ArrayList(); - updatedValues.add("{\"_key\":\"1\", \"foo\":\"bar\"}"); - updatedValues.add("{\"_key\":\"2\", \"foo\":\"bar\"}"); - final MultiDocumentEntity> updateResult = db.collection(COLLECTION_NAME) - .replaceDocuments(updatedValues); - assertThat(updateResult.getDocuments().size(), is(2)); - assertThat(updateResult.getErrors().size(), is(0)); - } - - @Test - public void load() { - final CollectionEntity result = db.collection(COLLECTION_NAME).load(); - assertThat(result.getName(), is(COLLECTION_NAME)); - } - - @Test - public void unload() { - final CollectionEntity result = db.collection(COLLECTION_NAME).unload(); - assertThat(result.getName(), is(COLLECTION_NAME)); - } - - @Test - public void getInfo() { - final CollectionEntity result = db.collection(COLLECTION_NAME).getInfo(); - assertThat(result.getName(), is(COLLECTION_NAME)); - } - - @Test - public void getPropeties() { - final CollectionPropertiesEntity result = db.collection(COLLECTION_NAME).getProperties(); - assertThat(result.getName(), is(COLLECTION_NAME)); - assertThat(result.getCount(), is(nullValue())); - } - - @Test - public void changeProperties() { - final CollectionPropertiesEntity properties = db.collection(COLLECTION_NAME).getProperties(); - assertThat(properties.getWaitForSync(), is(notNullValue())); - final CollectionPropertiesOptions options = new CollectionPropertiesOptions(); - options.waitForSync(!properties.getWaitForSync()); - final CollectionPropertiesEntity changedProperties = db.collection(COLLECTION_NAME).changeProperties(options); - assertThat(changedProperties.getWaitForSync(), is(notNullValue())); - assertThat(changedProperties.getWaitForSync(), is(not(properties.getWaitForSync()))); - } - - @Test - public void rename() { - if (arangoDB.getRole() != ServerRole.SINGLE) { - return; - } - try { - final CollectionEntity result = db.collection(COLLECTION_NAME).rename(COLLECTION_NAME + "1"); - assertThat(result, is(notNullValue())); - assertThat(result.getName(), is(COLLECTION_NAME + "1")); - final CollectionEntity info = db.collection(COLLECTION_NAME + "1").getInfo(); - assertThat(info.getName(), is(COLLECTION_NAME + "1")); - try { - db.collection(COLLECTION_NAME).getInfo(); - fail(); - } catch (final ArangoDBException e) { - } - } finally { - db.collection(COLLECTION_NAME + "1").rename(COLLECTION_NAME); - } - } - - @Test - public void getRevision() { - final CollectionRevisionEntity result = db.collection(COLLECTION_NAME).getRevision(); - assertThat(result, is(notNullValue())); - assertThat(result.getName(), is(COLLECTION_NAME)); - assertThat(result.getRevision(), is(notNullValue())); - } - - @Test - public void keyWithSpecialCharacter() { - final String key = "myKey_-:.@()+,=;$!*'%"; - db.collection(COLLECTION_NAME).insertDocument(new BaseDocument(key)); - final BaseDocument doc = db.collection(COLLECTION_NAME).getDocument(key, BaseDocument.class); - assertThat(doc, is(notNullValue())); - assertThat(doc.getKey(), is(key)); - } - - @Test - public void alreadyUrlEncodedkey() { - final String key = "http%3A%2F%2Fexample.com%2F"; - db.collection(COLLECTION_NAME).insertDocument(new BaseDocument(key)); - final BaseDocument doc = db.collection(COLLECTION_NAME).getDocument(key, BaseDocument.class); - assertThat(doc, is(notNullValue())); - assertThat(doc.getKey(), is(key)); - } - - @Test - public void grantAccessRW() { - try { - arangoDB.createUser("user1", "1234", null); - db.collection(COLLECTION_NAME).grantAccess("user1", Permissions.RW); - } finally { - arangoDB.deleteUser("user1"); - } - } - - @Test - public void grantAccessRO() { - try { - arangoDB.createUser("user1", "1234", null); - db.collection(COLLECTION_NAME).grantAccess("user1", Permissions.RO); - } finally { - arangoDB.deleteUser("user1"); - } - } - - @Test - public void grantAccessNONE() { - try { - arangoDB.createUser("user1", "1234", null); - db.collection(COLLECTION_NAME).grantAccess("user1", Permissions.NONE); - } finally { - arangoDB.deleteUser("user1"); - } - } - - @Test(expected = ArangoDBException.class) - public void grantAccessUserNotFound() { - db.collection(COLLECTION_NAME).grantAccess("user1", Permissions.RW); - } - - @Test - public void revokeAccess() { - try { - arangoDB.createUser("user1", "1234", null); - db.collection(COLLECTION_NAME).grantAccess("user1", Permissions.NONE); - } finally { - arangoDB.deleteUser("user1"); - } - } - - @Test(expected = ArangoDBException.class) - public void revokeAccessUserNotFound() { - db.collection(COLLECTION_NAME).grantAccess("user1", Permissions.NONE); - } - - @Test - public void resetAccess() { - try { - arangoDB.createUser("user1", "1234", null); - db.collection(COLLECTION_NAME).resetAccess("user1"); - } finally { - arangoDB.deleteUser("user1"); - } - } - - @Test(expected = ArangoDBException.class) - public void resetAccessUserNotFound() { - db.collection(COLLECTION_NAME).resetAccess("user1"); - } - - @Test - public void getPermissions() { - assertThat(Permissions.RW, is(db.collection(COLLECTION_NAME).getPermissions("root"))); - } - -} diff --git a/src/test/java/com/arangodb/ArangoDBTest.java b/src/test/java/com/arangodb/ArangoDBTest.java deleted file mode 100644 index 096acfbe1..000000000 --- a/src/test/java/com/arangodb/ArangoDBTest.java +++ /dev/null @@ -1,456 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb; - -import static org.hamcrest.Matchers.anyOf; -import static org.hamcrest.Matchers.contains; -import static org.hamcrest.Matchers.empty; -import static org.hamcrest.Matchers.everyItem; -import static org.hamcrest.Matchers.greaterThan; -import static org.hamcrest.Matchers.hasItem; -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.not; -import static org.hamcrest.Matchers.notNullValue; -import static org.junit.Assert.assertThat; -import static org.junit.Assert.fail; - -import java.util.Arrays; -import java.util.Collection; -import java.util.HashMap; -import java.util.Map; - -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -import org.junit.runners.Parameterized.Parameters; - -import com.arangodb.ArangoDB.Builder; -import com.arangodb.entity.ArangoDBVersion; -import com.arangodb.entity.LogEntity; -import com.arangodb.entity.LogLevel; -import com.arangodb.entity.LogLevelEntity; -import com.arangodb.entity.Permissions; -import com.arangodb.entity.UserEntity; -import com.arangodb.model.LogOptions; -import com.arangodb.model.LogOptions.SortOrder; -import com.arangodb.model.UserCreateOptions; -import com.arangodb.model.UserUpdateOptions; -import com.arangodb.velocypack.exception.VPackException; -import com.arangodb.velocystream.Request; -import com.arangodb.velocystream.RequestType; -import com.arangodb.velocystream.Response; - -/** - * @author Mark Vollmary - * - */ -@RunWith(Parameterized.class) -public class ArangoDBTest { - - @Parameters - public static Collection builders() { - return Arrays.asList(new ArangoDB.Builder().useProtocol(Protocol.VST), - new ArangoDB.Builder().useProtocol(Protocol.HTTP_JSON), - new ArangoDB.Builder().useProtocol(Protocol.HTTP_VPACK)); - } - - private static final String ROOT = "root"; - private static final String USER = "mit dem mund"; - private static final String PW = "machts der hund"; - private final ArangoDB arangoDB; - - public ArangoDBTest(final Builder builder) { - super(); - arangoDB = builder.build(); - } - - @Test - public void getVersion() { - final ArangoDBVersion version = arangoDB.getVersion(); - assertThat(version, is(notNullValue())); - assertThat(version.getServer(), is(notNullValue())); - assertThat(version.getVersion(), is(notNullValue())); - } - - @Test - public void createDatabase() { - try { - final Boolean result = arangoDB.createDatabase(BaseTest.TEST_DB); - assertThat(result, is(true)); - } finally { - try { - arangoDB.db(BaseTest.TEST_DB).drop(); - } catch (final ArangoDBException e) { - } - } - } - - @Test - public void deleteDatabase() { - final Boolean resultCreate = arangoDB.createDatabase(BaseTest.TEST_DB); - assertThat(resultCreate, is(true)); - final Boolean resultDelete = arangoDB.db(BaseTest.TEST_DB).drop(); - assertThat(resultDelete, is(true)); - } - - @Test - public void getDatabases() { - try { - Collection dbs = arangoDB.getDatabases(); - assertThat(dbs, is(notNullValue())); - assertThat(dbs.size(), is(greaterThan(0))); - final int dbCount = dbs.size(); - assertThat(dbs.iterator().next(), is("_system")); - arangoDB.createDatabase(BaseTest.TEST_DB); - dbs = arangoDB.getDatabases(); - assertThat(dbs.size(), is(greaterThan(dbCount))); - assertThat(dbs, hasItem("_system")); - assertThat(dbs, hasItem(BaseTest.TEST_DB)); - } finally { - arangoDB.db(BaseTest.TEST_DB).drop(); - } - } - - @Test - public void getAccessibleDatabases() { - final Collection dbs = arangoDB.getAccessibleDatabases(); - assertThat(dbs, is(notNullValue())); - assertThat(dbs.size(), greaterThan(0)); - assertThat(dbs, hasItem("_system")); - } - - @Test - public void getAccessibleDatabasesFor() { - final Collection dbs = arangoDB.getAccessibleDatabasesFor("root"); - assertThat(dbs, is(notNullValue())); - assertThat(dbs.size(), greaterThan(0)); - assertThat(dbs, hasItem("_system")); - } - - @Test - public void createUser() { - try { - final UserEntity result = arangoDB.createUser(USER, PW, null); - assertThat(result, is(notNullValue())); - assertThat(result.getUser(), is(USER)); - } finally { - arangoDB.deleteUser(USER); - } - } - - @Test - public void deleteUser() { - arangoDB.createUser(USER, PW, null); - arangoDB.deleteUser(USER); - } - - @Test - public void getUserRoot() { - final UserEntity user = arangoDB.getUser(ROOT); - assertThat(user, is(notNullValue())); - assertThat(user.getUser(), is(ROOT)); - } - - @Test - public void getUser() { - try { - arangoDB.createUser(USER, PW, null); - final UserEntity user = arangoDB.getUser(USER); - assertThat(user.getUser(), is(USER)); - } finally { - arangoDB.deleteUser(USER); - } - - } - - @Test - public void getUsersOnlyRoot() { - final Collection users = arangoDB.getUsers(); - assertThat(users, is(notNullValue())); - assertThat(users.size(), greaterThan(0)); - } - - @Test - public void getUsers() { - try { - arangoDB.createUser(USER, PW, null); - final Collection users = arangoDB.getUsers(); - assertThat(users, is(notNullValue())); - assertThat(users.size(), is(2)); - for (final UserEntity user : users) { - assertThat(user.getUser(), anyOf(is(ROOT), is(USER))); - } - } finally { - arangoDB.deleteUser(USER); - } - } - - @Test - public void updateUserNoOptions() { - try { - arangoDB.createUser(USER, PW, null); - arangoDB.updateUser(USER, null); - } finally { - arangoDB.deleteUser(USER); - } - } - - @Test - public void updateUser() { - try { - final Map extra = new HashMap(); - extra.put("hund", false); - arangoDB.createUser(USER, PW, new UserCreateOptions().extra(extra)); - extra.put("hund", true); - extra.put("mund", true); - final UserEntity user = arangoDB.updateUser(USER, new UserUpdateOptions().extra(extra)); - assertThat(user, is(notNullValue())); - assertThat(user.getExtra().size(), is(2)); - assertThat(user.getExtra().get("hund"), is(notNullValue())); - assertThat(Boolean.valueOf(String.valueOf(user.getExtra().get("hund"))), is(true)); - final UserEntity user2 = arangoDB.getUser(USER); - assertThat(user2.getExtra().size(), is(2)); - assertThat(user2.getExtra().get("hund"), is(notNullValue())); - assertThat(Boolean.valueOf(String.valueOf(user2.getExtra().get("hund"))), is(true)); - } finally { - arangoDB.deleteUser(USER); - } - } - - @Test - public void replaceUser() { - try { - final Map extra = new HashMap(); - extra.put("hund", false); - arangoDB.createUser(USER, PW, new UserCreateOptions().extra(extra)); - extra.remove("hund"); - extra.put("mund", true); - final UserEntity user = arangoDB.replaceUser(USER, new UserUpdateOptions().extra(extra)); - assertThat(user, is(notNullValue())); - assertThat(user.getExtra().size(), is(1)); - assertThat(user.getExtra().get("mund"), is(notNullValue())); - assertThat(Boolean.valueOf(String.valueOf(user.getExtra().get("mund"))), is(true)); - final UserEntity user2 = arangoDB.getUser(USER); - assertThat(user2.getExtra().size(), is(1)); - assertThat(user2.getExtra().get("mund"), is(notNullValue())); - assertThat(Boolean.valueOf(String.valueOf(user2.getExtra().get("mund"))), is(true)); - } finally { - arangoDB.deleteUser(USER); - } - } - - @Test - public void updateUserDefaultDatabaseAccess() { - try { - arangoDB.createUser(USER, PW); - arangoDB.grantDefaultDatabaseAccess(USER, Permissions.RW); - } finally { - arangoDB.deleteUser(USER); - } - } - - @Test - public void updateUserDefaultCollectionAccess() { - try { - arangoDB.createUser(USER, PW); - arangoDB.grantDefaultCollectionAccess(USER, Permissions.RW); - } finally { - arangoDB.deleteUser(USER); - } - } - - @Test - public void authenticationFailPassword() { - final ArangoDB arangoDB = new ArangoDB.Builder().password("no").build(); - try { - arangoDB.getVersion(); - fail(); - } catch (final ArangoDBException e) { - - } - } - - @Test - public void authenticationFailUser() { - final ArangoDB arangoDB = new ArangoDB.Builder().user("no").build(); - try { - arangoDB.getVersion(); - fail(); - } catch (final ArangoDBException e) { - - } - } - - @Test - public void execute() throws VPackException { - final Response response = arangoDB.execute(new Request("_system", RequestType.GET, "/_api/version")); - assertThat(response.getBody(), is(notNullValue())); - assertThat(response.getBody().get("version").isString(), is(true)); - } - - @Test - public void getLogs() { - final LogEntity logs = arangoDB.getLogs(null); - assertThat(logs, is(notNullValue())); - assertThat(logs.getTotalAmount(), greaterThan(0L)); - assertThat((long) logs.getLid().size(), is(logs.getTotalAmount())); - assertThat((long) logs.getLevel().size(), is(logs.getTotalAmount())); - assertThat((long) logs.getTimestamp().size(), is(logs.getTotalAmount())); - assertThat((long) logs.getText().size(), is(logs.getTotalAmount())); - } - - @Test - public void getLogsUpto() { - final LogEntity logsUpto = arangoDB.getLogs(new LogOptions().upto(LogLevel.WARNING)); - assertThat(logsUpto, is(notNullValue())); - assertThat(logsUpto.getLevel(), not(contains(LogLevel.INFO))); - } - - @Test - public void getLogsLevel() { - final LogEntity logsInfo = arangoDB.getLogs(new LogOptions().level(LogLevel.INFO)); - assertThat(logsInfo, is(notNullValue())); - assertThat(logsInfo.getLevel(), everyItem(is(LogLevel.INFO))); - } - - @Test - public void getLogsStart() { - final LogEntity logs = arangoDB.getLogs(null); - assertThat(logs.getLid(), not(empty())); - final LogEntity logsStart = arangoDB.getLogs(new LogOptions().start(logs.getLid().get(0) + 1)); - assertThat(logsStart, is(notNullValue())); - assertThat(logsStart.getLid(), not(contains(logs.getLid().get(0)))); - } - - @Test - public void getLogsSize() { - final LogEntity logs = arangoDB.getLogs(null); - assertThat(logs.getLid().size(), greaterThan(0)); - final LogEntity logsSize = arangoDB.getLogs(new LogOptions().size(logs.getLid().size() - 1)); - assertThat(logsSize, is(notNullValue())); - assertThat(logsSize.getLid().size(), is(logs.getLid().size() - 1)); - } - - @Test - public void getLogsOffset() { - final LogEntity logs = arangoDB.getLogs(null); - assertThat(logs.getTotalAmount(), greaterThan(0L)); - final LogEntity logsOffset = arangoDB.getLogs(new LogOptions().offset(1)); - assertThat(logsOffset, is(notNullValue())); - assertThat(logsOffset.getLid(), not(hasItem(logs.getLid().get(0)))); - } - - @Test - public void getLogsSearch() { - final LogEntity logs = arangoDB.getLogs(null); - final LogEntity logsSearch = arangoDB.getLogs(new LogOptions().search(BaseTest.TEST_DB)); - assertThat(logsSearch, is(notNullValue())); - assertThat(logs.getTotalAmount(), greaterThan(logsSearch.getTotalAmount())); - } - - @Test - public void getLogsSortAsc() { - final LogEntity logs = arangoDB.getLogs(new LogOptions().sort(SortOrder.asc)); - assertThat(logs, is(notNullValue())); - long lastId = -1; - for (final Long id : logs.getLid()) { - assertThat(id, greaterThan(lastId)); - lastId = id; - } - } - - @Test - public void getLogsSortDesc() { - final LogEntity logs = arangoDB.getLogs(new LogOptions().sort(SortOrder.desc)); - assertThat(logs, is(notNullValue())); - long lastId = Long.MAX_VALUE; - for (final Long id : logs.getLid()) { - assertThat(lastId, greaterThan(id)); - lastId = id; - } - } - - @Test - public void getLogLevel() { - final LogLevelEntity logLevel = arangoDB.getLogLevel(); - assertThat(logLevel, is(notNullValue())); - assertThat(logLevel.getAgency(), is(LogLevelEntity.LogLevel.INFO)); - } - - @Test - public void setLogLevel() { - final LogLevelEntity entity = new LogLevelEntity(); - try { - entity.setAgency(LogLevelEntity.LogLevel.ERROR); - final LogLevelEntity logLevel = arangoDB.setLogLevel(entity); - assertThat(logLevel, is(notNullValue())); - assertThat(logLevel.getAgency(), is(LogLevelEntity.LogLevel.ERROR)); - } finally { - entity.setAgency(LogLevelEntity.LogLevel.INFO); - arangoDB.setLogLevel(entity); - } - } - - @Test - public void arangoDBException() { - try { - arangoDB.db("no").getInfo(); - fail(); - } catch (final ArangoDBException e) { - assertThat(e.getResponseCode(), is(404)); - assertThat(e.getErrorNum(), is(1228)); - assertThat(e.getErrorMessage(), is("database not found")); - } - } - - @Test - public void fallbackHost() { - final ArangoDB arangoDB = new ArangoDB.Builder().host("not-accessible", 8529).host("127.0.0.1", 8529).build(); - final ArangoDBVersion version = arangoDB.getVersion(); - assertThat(version, is(notNullValue())); - } - - @Test(expected = ArangoDBException.class) - public void loadproperties() { - new ArangoDB.Builder().loadProperties(ArangoDBTest.class.getResourceAsStream("/arangodb-bad.properties")); - } - - @Test(expected = ArangoDBException.class) - public void loadproperties2() { - new ArangoDB.Builder().loadProperties(ArangoDBTest.class.getResourceAsStream("/arangodb-bad2.properties")); - } - - @Test - public void accessMultipleDatabases() { - try { - arangoDB.createDatabase("db1"); - arangoDB.createDatabase("db2"); - - final ArangoDBVersion version1 = arangoDB.db("db1").getVersion(); - assertThat(version1, is(notNullValue())); - final ArangoDBVersion version2 = arangoDB.db("db2").getVersion(); - assertThat(version2, is(notNullValue())); - } finally { - arangoDB.db("db1").drop(); - arangoDB.db("db2").drop(); - } - } -} diff --git a/src/test/java/com/arangodb/ArangoDatabaseTest.java b/src/test/java/com/arangodb/ArangoDatabaseTest.java deleted file mode 100644 index b22f6ce0f..000000000 --- a/src/test/java/com/arangodb/ArangoDatabaseTest.java +++ /dev/null @@ -1,1204 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb; - -import static org.hamcrest.CoreMatchers.notNullValue; -import static org.hamcrest.CoreMatchers.nullValue; -import static org.hamcrest.Matchers.empty; -import static org.hamcrest.Matchers.greaterThan; -import static org.hamcrest.Matchers.hasItem; -import static org.hamcrest.Matchers.is; -import static org.junit.Assert.assertThat; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.HashMap; -import java.util.Iterator; -import java.util.Map; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.atomic.AtomicInteger; - -import org.junit.Ignore; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -import com.arangodb.ArangoDB.Builder; -import com.arangodb.entity.AqlExecutionExplainEntity; -import com.arangodb.entity.AqlExecutionExplainEntity.ExecutionPlan; -import com.arangodb.entity.AqlFunctionEntity; -import com.arangodb.entity.AqlParseEntity; -import com.arangodb.entity.AqlParseEntity.AstNode; -import com.arangodb.entity.ArangoDBVersion; -import com.arangodb.entity.ArangoDBVersion.License; -import com.arangodb.entity.BaseDocument; -import com.arangodb.entity.BaseEdgeDocument; -import com.arangodb.entity.CollectionEntity; -import com.arangodb.entity.CollectionPropertiesEntity; -import com.arangodb.entity.CollectionType; -import com.arangodb.entity.DatabaseEntity; -import com.arangodb.entity.GraphEntity; -import com.arangodb.entity.IndexEntity; -import com.arangodb.entity.PathEntity; -import com.arangodb.entity.Permissions; -import com.arangodb.entity.QueryCachePropertiesEntity; -import com.arangodb.entity.QueryCachePropertiesEntity.CacheMode; -import com.arangodb.entity.QueryEntity; -import com.arangodb.entity.QueryExecutionState; -import com.arangodb.entity.QueryTrackingPropertiesEntity; -import com.arangodb.entity.ServerRole; -import com.arangodb.entity.TraversalEntity; -import com.arangodb.model.AqlFunctionDeleteOptions; -import com.arangodb.model.AqlQueryOptions; -import com.arangodb.model.CollectionCreateOptions; -import com.arangodb.model.CollectionsReadOptions; -import com.arangodb.model.TransactionOptions; -import com.arangodb.model.TraversalOptions; -import com.arangodb.model.TraversalOptions.Direction; -import com.arangodb.util.MapBuilder; -import com.arangodb.velocypack.VPackBuilder; -import com.arangodb.velocypack.VPackSlice; -import com.arangodb.velocypack.ValueType; -import com.arangodb.velocypack.exception.VPackException; - -/** - * @author Mark Vollmary - * - */ -@RunWith(Parameterized.class) -public class ArangoDatabaseTest extends BaseTest { - - private static final String COLLECTION_NAME = "db_test"; - private static final String GRAPH_NAME = "graph_test"; - - public ArangoDatabaseTest(final Builder builder) { - super(builder); - } - - @Test - public void getVersion() { - final ArangoDBVersion version = db.getVersion(); - assertThat(version, is(notNullValue())); - assertThat(version.getServer(), is(notNullValue())); - assertThat(version.getVersion(), is(notNullValue())); - } - - @Test - public void exists() { - assertThat(db.exists(), is(true)); - assertThat(arangoDB.db("no").exists(), is(false)); - } - - @Test - public void getAccessibleDatabases() { - final Collection dbs = db.getAccessibleDatabases(); - assertThat(dbs, is(notNullValue())); - assertThat(dbs.size(), greaterThan(0)); - assertThat(dbs, hasItem("_system")); - } - - @Test - public void createCollection() { - try { - final CollectionEntity result = db.createCollection(COLLECTION_NAME, null); - assertThat(result, is(notNullValue())); - assertThat(result.getId(), is(notNullValue())); - } finally { - db.collection(COLLECTION_NAME).drop(); - } - } - - @Test - public void createCollectionWithReplicationFactor() { - if (arangoDB.getRole() == ServerRole.SINGLE) { - return; - } - try { - final CollectionEntity result = db.createCollection(COLLECTION_NAME, - new CollectionCreateOptions().replicationFactor(2)); - assertThat(result, is(notNullValue())); - assertThat(result.getId(), is(notNullValue())); - assertThat(db.collection(COLLECTION_NAME).getProperties().getReplicationFactor(), is(2)); - } finally { - db.collection(COLLECTION_NAME).drop(); - } - } - - @Test - public void createCollectionWithNumberOfShards() { - if (arangoDB.getRole() == ServerRole.SINGLE) { - return; - } - try { - final CollectionEntity result = db.createCollection(COLLECTION_NAME, - new CollectionCreateOptions().numberOfShards(2)); - assertThat(result, is(notNullValue())); - assertThat(result.getId(), is(notNullValue())); - assertThat(db.collection(COLLECTION_NAME).getProperties().getNumberOfShards(), is(2)); - } finally { - db.collection(COLLECTION_NAME).drop(); - } - } - - @Test - public void createCollectionWithNumberOfShardsAndShardKey() { - if (arangoDB.getRole() == ServerRole.SINGLE) { - return; - } - try { - final CollectionEntity result = db.createCollection(COLLECTION_NAME, - new CollectionCreateOptions().numberOfShards(2).shardKeys("a")); - assertThat(result, is(notNullValue())); - assertThat(result.getId(), is(notNullValue())); - final CollectionPropertiesEntity properties = db.collection(COLLECTION_NAME).getProperties(); - assertThat(properties.getNumberOfShards(), is(2)); - assertThat(properties.getShardKeys().size(), is(1)); - } finally { - db.collection(COLLECTION_NAME).drop(); - } - } - - @Test - public void createCollectionWithNumberOfShardsAndShardKeys() { - if (arangoDB.getRole() == ServerRole.SINGLE) { - return; - } - try { - final CollectionEntity result = db.createCollection(COLLECTION_NAME, - new CollectionCreateOptions().numberOfShards(2).shardKeys("a", "b")); - assertThat(result, is(notNullValue())); - assertThat(result.getId(), is(notNullValue())); - final CollectionPropertiesEntity properties = db.collection(COLLECTION_NAME).getProperties(); - assertThat(properties.getNumberOfShards(), is(2)); - assertThat(properties.getShardKeys().size(), is(2)); - } finally { - db.collection(COLLECTION_NAME).drop(); - } - } - - @Test - public void createCollectionWithDistributeShardsLike() { - if (arangoDB.getVersion().getLicense() == License.ENTERPRISE && arangoDB.getRole() != ServerRole.SINGLE) { - final Integer numberOfShards = 3; - db.createCollection(COLLECTION_NAME, new CollectionCreateOptions().numberOfShards(numberOfShards)); - db.createCollection(COLLECTION_NAME + "2", - new CollectionCreateOptions().distributeShardsLike(COLLECTION_NAME)); - assertThat(db.collection(COLLECTION_NAME).getProperties().getNumberOfShards(), is(numberOfShards)); - assertThat(db.collection(COLLECTION_NAME + "2").getProperties().getNumberOfShards(), is(numberOfShards)); - } - } - - @Test - public void deleteCollection() { - db.createCollection(COLLECTION_NAME, null); - db.collection(COLLECTION_NAME).drop(); - try { - db.collection(COLLECTION_NAME).getInfo(); - fail(); - } catch (final ArangoDBException e) { - } - } - - @Test - public void deleteSystemCollection() { - if (arangoDB.getRole() != ServerRole.SINGLE) { - return; - } - final String name = "_system_test"; - db.createCollection(name, new CollectionCreateOptions().isSystem(true)); - db.collection(name).drop(true); - try { - db.collection(name).getInfo(); - fail(); - } catch (final ArangoDBException e) { - } - } - - @Test - public void deleteSystemCollectionFail() { - if (arangoDB.getRole() != ServerRole.SINGLE) { - return; - } - final String name = "_system_test"; - db.createCollection(name, new CollectionCreateOptions().isSystem(true)); - try { - db.collection(name).drop(); - fail(); - } catch (final ArangoDBException e) { - } - db.collection(name).drop(true); - try { - db.collection(name).getInfo(); - fail(); - } catch (final ArangoDBException e) { - } - } - - @Test - public void getIndex() { - try { - db.createCollection(COLLECTION_NAME, null); - final Collection fields = new ArrayList(); - fields.add("a"); - final IndexEntity createResult = db.collection(COLLECTION_NAME).ensureHashIndex(fields, null); - final IndexEntity readResult = db.getIndex(createResult.getId()); - assertThat(readResult.getId(), is(createResult.getId())); - assertThat(readResult.getType(), is(createResult.getType())); - } finally { - db.collection(COLLECTION_NAME).drop(); - } - } - - @Test - public void deleteIndex() { - try { - db.createCollection(COLLECTION_NAME, null); - final Collection fields = new ArrayList(); - fields.add("a"); - final IndexEntity createResult = db.collection(COLLECTION_NAME).ensureHashIndex(fields, null); - final String id = db.deleteIndex(createResult.getId()); - assertThat(id, is(createResult.getId())); - try { - db.getIndex(id); - fail(); - } catch (final ArangoDBException e) { - } - } finally { - db.collection(COLLECTION_NAME).drop(); - } - } - - @Test - public void getCollections() { - try { - final Collection systemCollections = db.getCollections(null); - db.createCollection(COLLECTION_NAME + "1", null); - db.createCollection(COLLECTION_NAME + "2", null); - final Collection collections = db.getCollections(null); - assertThat(collections.size(), is(2 + systemCollections.size())); - assertThat(collections, is(notNullValue())); - } finally { - db.collection(COLLECTION_NAME + "1").drop(); - db.collection(COLLECTION_NAME + "2").drop(); - } - } - - @Test - public void getCollectionsExcludeSystem() { - try { - final CollectionsReadOptions options = new CollectionsReadOptions().excludeSystem(true); - final Collection systemCollections = db.getCollections(options); - assertThat(systemCollections.size(), is(0)); - db.createCollection(COLLECTION_NAME + "1", null); - db.createCollection(COLLECTION_NAME + "2", null); - final Collection collections = db.getCollections(options); - assertThat(collections.size(), is(2)); - assertThat(collections, is(notNullValue())); - } finally { - db.collection(COLLECTION_NAME + "1").drop(); - db.collection(COLLECTION_NAME + "2").drop(); - } - } - - @Test - public void grantAccess() { - try { - arangoDB.createUser("user1", "1234", null); - db.grantAccess("user1"); - } finally { - arangoDB.deleteUser("user1"); - } - } - - @Test - public void grantAccessRW() { - try { - arangoDB.createUser("user1", "1234", null); - db.grantAccess("user1", Permissions.RW); - } finally { - arangoDB.deleteUser("user1"); - } - } - - @Test - public void grantAccessRO() { - try { - arangoDB.createUser("user1", "1234", null); - db.grantAccess("user1", Permissions.RO); - } finally { - arangoDB.deleteUser("user1"); - } - } - - @Test - public void grantAccessNONE() { - try { - arangoDB.createUser("user1", "1234", null); - db.grantAccess("user1", Permissions.NONE); - } finally { - arangoDB.deleteUser("user1"); - } - } - - @Test(expected = ArangoDBException.class) - public void grantAccessUserNotFound() { - db.grantAccess("user1", Permissions.RW); - } - - @Test - public void revokeAccess() { - try { - arangoDB.createUser("user1", "1234", null); - db.revokeAccess("user1"); - } finally { - arangoDB.deleteUser("user1"); - } - } - - @Test(expected = ArangoDBException.class) - public void revokeAccessUserNotFound() { - db.revokeAccess("user1"); - } - - @Test - public void resetAccess() { - try { - arangoDB.createUser("user1", "1234", null); - db.resetAccess("user1"); - } finally { - arangoDB.deleteUser("user1"); - } - } - - @Test(expected = ArangoDBException.class) - public void resetAccessUserNotFound() { - db.resetAccess("user1"); - } - - @Test - public void grantDefaultCollectionAccess() { - try { - arangoDB.createUser("user1", "1234"); - db.grantDefaultCollectionAccess("user1", Permissions.RW); - } finally { - arangoDB.deleteUser("user1"); - } - } - - @Test - public void getPermissions() { - assertThat(Permissions.RW, is(db.getPermissions("root"))); - } - - @Test - public void query() { - try { - db.createCollection(COLLECTION_NAME, null); - for (int i = 0; i < 10; i++) { - db.collection(COLLECTION_NAME).insertDocument(new BaseDocument(), null); - } - final ArangoCursor cursor = db.query("for i in db_test return i._id", null, null, String.class); - assertThat(cursor, is(notNullValue())); - for (int i = 0; i < 10; i++, cursor.next()) { - assertThat(cursor.hasNext(), is(i != 10)); - } - } finally { - db.collection(COLLECTION_NAME).drop(); - } - } - - @Test - public void queryForEach() { - try { - db.createCollection(COLLECTION_NAME, null); - for (int i = 0; i < 10; i++) { - db.collection(COLLECTION_NAME).insertDocument(new BaseDocument(), null); - } - final ArangoCursor cursor = db.query("for i in db_test return i._id", null, null, String.class); - assertThat(cursor, is(notNullValue())); - final AtomicInteger i = new AtomicInteger(0); - for (; cursor.hasNext(); cursor.next()) { - i.incrementAndGet(); - } - assertThat(i.get(), is(10)); - } finally { - db.collection(COLLECTION_NAME).drop(); - } - } - - @Test - public void queryStream() { - try { - db.createCollection(COLLECTION_NAME, null); - for (int i = 0; i < 10; i++) { - db.collection(COLLECTION_NAME).insertDocument(new BaseDocument(), null); - } - final ArangoCursor cursor = db.query("for i in db_test return i._id", null, null, String.class); - assertThat(cursor, is(notNullValue())); - final AtomicInteger i = new AtomicInteger(0); - for (; cursor.hasNext(); cursor.next()) { - i.incrementAndGet(); - } - assertThat(i.get(), is(10)); - } finally { - db.collection(COLLECTION_NAME).drop(); - } - } - - @Test - public void queryWithCount() { - try { - db.createCollection(COLLECTION_NAME, null); - for (int i = 0; i < 10; i++) { - db.collection(COLLECTION_NAME).insertDocument(new BaseDocument(), null); - } - - final ArangoCursor cursor = db.query("for i in db_test Limit 6 return i._id", null, - new AqlQueryOptions().count(true), String.class); - assertThat(cursor, is(notNullValue())); - for (int i = 0; i < 6; i++, cursor.next()) { - assertThat(cursor.hasNext(), is(i != 6)); - } - assertThat(cursor.getCount(), is(6)); - - } finally { - db.collection(COLLECTION_NAME).drop(); - } - } - - @Test - public void queryWithLimitAndFullCount() { - try { - db.createCollection(COLLECTION_NAME, null); - for (int i = 0; i < 10; i++) { - db.collection(COLLECTION_NAME).insertDocument(new BaseDocument(), null); - } - - final ArangoCursor cursor = db.query("for i in db_test Limit 5 return i._id", null, - new AqlQueryOptions().fullCount(true), String.class); - assertThat(cursor, is(notNullValue())); - for (int i = 0; i < 5; i++, cursor.next()) { - assertThat(cursor.hasNext(), is(i != 5)); - } - assertThat(cursor.getStats(), is(notNullValue())); - assertThat(cursor.getStats().getFullCount(), is(10L)); - - } finally { - db.collection(COLLECTION_NAME).drop(); - } - } - - @Test - public void queryWithBatchSize() { - try { - db.createCollection(COLLECTION_NAME, null); - for (int i = 0; i < 10; i++) { - db.collection(COLLECTION_NAME).insertDocument(new BaseDocument(), null); - } - - final ArangoCursor cursor = db.query("for i in db_test return i._id", null, - new AqlQueryOptions().batchSize(5).count(true), String.class); - - assertThat(cursor, is(notNullValue())); - for (int i = 0; i < 10; i++, cursor.next()) { - assertThat(cursor.hasNext(), is(i != 10)); - } - - } finally { - db.collection(COLLECTION_NAME).drop(); - } - } - - @Test - public void queryStreamWithBatchSize() { - try { - db.createCollection(COLLECTION_NAME, null); - for (int i = 0; i < 10; i++) { - db.collection(COLLECTION_NAME).insertDocument(new BaseDocument(), null); - } - - final ArangoCursor cursor = db.query("for i in db_test return i._id", null, - new AqlQueryOptions().batchSize(5).count(true), String.class); - - assertThat(cursor, is(notNullValue())); - final AtomicInteger i = new AtomicInteger(0); - for (; cursor.hasNext(); cursor.next()) { - i.incrementAndGet(); - } - assertThat(i.get(), is(10)); - } finally { - db.collection(COLLECTION_NAME).drop(); - } - } - - /** - * ignored. takes to long - */ - @Test - @Ignore - public void queryWithTTL() throws InterruptedException { - // set TTL to 1 seconds and get the second batch after 2 seconds! - final int ttl = 1; - final int wait = 2; - try { - db.createCollection(COLLECTION_NAME, null); - for (int i = 0; i < 10; i++) { - db.collection(COLLECTION_NAME).insertDocument(new BaseDocument(), null); - } - - final ArangoCursor cursor = db.query("for i in db_test return i._id", null, - new AqlQueryOptions().batchSize(5).ttl(ttl), String.class); - - assertThat(cursor, is(notNullValue())); - - for (int i = 0; i < 10; i++, cursor.next()) { - assertThat(cursor.hasNext(), is(i != 10)); - if (i == 1) { - Thread.sleep(wait * 1000); - } - } - fail("this should fail"); - } catch (final ArangoDBException ex) { - assertThat(ex.getMessage(), is("Response: 404, Error: 1600 - cursor not found")); - } finally { - db.collection(COLLECTION_NAME).drop(); - } - } - - @Test - public void changeQueryCache() { - try { - QueryCachePropertiesEntity properties = db.getQueryCacheProperties(); - assertThat(properties, is(notNullValue())); - assertThat(properties.getMode(), is(CacheMode.off)); - assertThat(properties.getMaxResults(), greaterThan(0L)); - - properties.setMode(CacheMode.on); - properties = db.setQueryCacheProperties(properties); - assertThat(properties, is(notNullValue())); - assertThat(properties.getMode(), is(CacheMode.on)); - - properties = db.getQueryCacheProperties(); - assertThat(properties.getMode(), is(CacheMode.on)); - } finally { - final QueryCachePropertiesEntity properties = new QueryCachePropertiesEntity(); - properties.setMode(CacheMode.off); - db.setQueryCacheProperties(properties); - } - } - - @Test - public void queryWithCache() throws InterruptedException { - if (arangoDB.getRole() != ServerRole.SINGLE) { - return; - } - try { - db.createCollection(COLLECTION_NAME, null); - for (int i = 0; i < 10; i++) { - db.collection(COLLECTION_NAME).insertDocument(new BaseDocument(), null); - } - - final QueryCachePropertiesEntity properties = new QueryCachePropertiesEntity(); - properties.setMode(CacheMode.on); - db.setQueryCacheProperties(properties); - - final ArangoCursor cursor = db.query("FOR t IN db_test FILTER t.age >= 10 SORT t.age RETURN t._id", - null, new AqlQueryOptions().cache(true), String.class); - - assertThat(cursor, is(notNullValue())); - assertThat(cursor.isCached(), is(false)); - - final ArangoCursor cachedCursor = db.query( - "FOR t IN db_test FILTER t.age >= 10 SORT t.age RETURN t._id", null, new AqlQueryOptions().cache(true), - String.class); - - assertThat(cachedCursor, is(notNullValue())); - assertThat(cachedCursor.isCached(), is(true)); - - } finally { - db.collection(COLLECTION_NAME).drop(); - final QueryCachePropertiesEntity properties = new QueryCachePropertiesEntity(); - properties.setMode(CacheMode.off); - db.setQueryCacheProperties(properties); - } - } - - @Test - public void queryWithMemoryLimit() { - try { - db.query("RETURN 'bla'", null, new AqlQueryOptions().memoryLimit(1L), String.class); - fail(); - } catch (final ArangoDBException e) { - assertThat(e.getErrorNum(), is(32)); - } - } - - @Test(expected = ArangoDBException.class) - public void queryWithFailOnWarningTrue() { - db.query("RETURN 1 / 0", null, new AqlQueryOptions().failOnWarning(true), String.class); - } - - @Test - public void queryWithFailOnWarningFalse() { - final ArangoCursor cursor = db.query("RETURN 1 / 0", null, new AqlQueryOptions().failOnWarning(false), - String.class); - assertThat(cursor.next(), is("null")); - } - - @Test - public void queryWithMaxWarningCount() { - final ArangoCursor cursorWithWarnings = db.query("RETURN 1 / 0", null, new AqlQueryOptions(), - String.class); - assertThat(cursorWithWarnings.getWarnings().size(), is(1)); - final ArangoCursor cursorWithLimitedWarnings = db.query("RETURN 1 / 0", null, - new AqlQueryOptions().maxWarningCount(0L), String.class); - assertThat(cursorWithLimitedWarnings.getWarnings().size(), is(0)); - } - - @Test - public void queryCursor() { - try { - db.createCollection(COLLECTION_NAME, null); - final int numbDocs = 10; - for (int i = 0; i < numbDocs; i++) { - db.collection(COLLECTION_NAME).insertDocument(new BaseDocument(), null); - } - - final int batchSize = 5; - final ArangoCursor cursor = db.query("for i in db_test return i._id", null, - new AqlQueryOptions().batchSize(batchSize).count(true), String.class); - assertThat(cursor, is(notNullValue())); - assertThat(cursor.getCount(), is(numbDocs)); - - final ArangoCursor cursor2 = db.cursor(cursor.getId(), String.class); - assertThat(cursor2, is(notNullValue())); - assertThat(cursor2.getCount(), is(numbDocs)); - assertThat(cursor2.hasNext(), is(true)); - - for (int i = 0; i < batchSize; i++, cursor.next()) { - assertThat(cursor.hasNext(), is(i != batchSize)); - } - } finally { - db.collection(COLLECTION_NAME).drop(); - } - } - - @Test - public void changeQueryTrackingProperties() { - try { - QueryTrackingPropertiesEntity properties = db.getQueryTrackingProperties(); - assertThat(properties, is(notNullValue())); - assertThat(properties.getEnabled(), is(true)); - assertThat(properties.getTrackSlowQueries(), is(true)); - assertThat(properties.getMaxQueryStringLength(), greaterThan(0L)); - assertThat(properties.getMaxSlowQueries(), greaterThan(0L)); - assertThat(properties.getSlowQueryThreshold(), greaterThan(0L)); - properties.setEnabled(false); - properties = db.setQueryTrackingProperties(properties); - assertThat(properties, is(notNullValue())); - assertThat(properties.getEnabled(), is(false)); - properties = db.getQueryTrackingProperties(); - assertThat(properties.getEnabled(), is(false)); - } finally { - final QueryTrackingPropertiesEntity properties = new QueryTrackingPropertiesEntity(); - properties.setEnabled(true); - db.setQueryTrackingProperties(properties); - } - } - - @Test - public void queryWithBindVars() throws InterruptedException { - try { - db.createCollection(COLLECTION_NAME, null); - for (int i = 0; i < 10; i++) { - final BaseDocument baseDocument = new BaseDocument(); - baseDocument.addAttribute("age", 20 + i); - db.collection(COLLECTION_NAME).insertDocument(baseDocument, null); - } - final Map bindVars = new HashMap(); - bindVars.put("@coll", COLLECTION_NAME); - bindVars.put("age", 25); - - final ArangoCursor cursor = db.query("FOR t IN @@coll FILTER t.age >= @age SORT t.age RETURN t._id", - bindVars, null, String.class); - - assertThat(cursor, is(notNullValue())); - - for (int i = 0; i < 5; i++, cursor.next()) { - assertThat(cursor.hasNext(), is(i != 5)); - } - - } finally { - db.collection(COLLECTION_NAME).drop(); - } - } - - @Test - public void queryWithWarning() { - final ArangoCursor cursor = arangoDB.db().query("return _apps + 1", null, null, String.class); - - assertThat(cursor, is(notNullValue())); - assertThat(cursor.getWarnings(), is(notNullValue())); - } - - @Test - public void queryClose() throws IOException { - final ArangoCursor cursor = arangoDB.db().query("for i in 1..2 return i", null, - new AqlQueryOptions().batchSize(1), String.class); - cursor.close(); - int count = 0; - try { - for (; cursor.hasNext(); cursor.next(), count++) { - } - fail(); - } catch (final ArangoDBException e) { - assertThat(count, is(1)); - } - - } - - @Test - public void queryNoResults() throws IOException { - try { - db.createCollection(COLLECTION_NAME); - final ArangoCursor cursor = db.query("FOR i IN @@col RETURN i", - new MapBuilder().put("@col", COLLECTION_NAME).get(), null, BaseDocument.class); - cursor.close(); - } finally { - db.collection(COLLECTION_NAME).drop(); - } - } - - @Test - public void explainQuery() { - final AqlExecutionExplainEntity explain = arangoDB.db().explainQuery("for i in 1..1 return i", null, null); - assertThat(explain, is(notNullValue())); - assertThat(explain.getPlan(), is(notNullValue())); - assertThat(explain.getPlans(), is(nullValue())); - final ExecutionPlan plan = explain.getPlan(); - assertThat(plan.getCollections().size(), is(0)); - assertThat(plan.getEstimatedCost(), greaterThan(0)); - assertThat(plan.getEstimatedNrItems(), greaterThan(0)); - assertThat(plan.getVariables().size(), is(2)); - assertThat(plan.getNodes().size(), is(greaterThan(0))); - } - - @Test - public void parseQuery() { - final AqlParseEntity parse = arangoDB.db().parseQuery("for i in 1..1 return i"); - assertThat(parse, is(notNullValue())); - assertThat(parse.getBindVars(), is(empty())); - assertThat(parse.getCollections().size(), is(0)); - assertThat(parse.getAst().size(), is(1)); - final AstNode root = parse.getAst().iterator().next(); - assertThat(root.getType(), is("root")); - assertThat(root.getName(), is(nullValue())); - assertThat(root.getSubNodes(), is(notNullValue())); - assertThat(root.getSubNodes().size(), is(2)); - final Iterator iterator = root.getSubNodes().iterator(); - final AstNode for_ = iterator.next(); - assertThat(for_.getType(), is("for")); - assertThat(for_.getSubNodes(), is(notNullValue())); - assertThat(for_.getSubNodes().size(), is(2)); - final Iterator iterator2 = for_.getSubNodes().iterator(); - final AstNode first = iterator2.next(); - assertThat(first.getType(), is("variable")); - assertThat(first.getName(), is("i")); - final AstNode second = iterator2.next(); - assertThat(second.getType(), is("range")); - assertThat(second.getName(), is(nullValue())); - final AstNode return_ = iterator.next(); - assertThat(return_.getType(), is("return")); - assertThat(return_.getSubNodes(), is(notNullValue())); - assertThat(return_.getSubNodes().size(), is(1)); - assertThat(return_.getSubNodes().iterator().next().getType(), is("reference")); - assertThat(return_.getSubNodes().iterator().next().getName(), is("i")); - } - - @Test - @Ignore - public void getCurrentlyRunningQueries() throws InterruptedException, ExecutionException { - final Thread t = new Thread() { - @Override - public void run() { - super.run(); - db.query("return sleep(0.2)", null, null, Void.class); - } - }; - t.start(); - Thread.sleep(100); - try { - final Collection currentlyRunningQueries = db.getCurrentlyRunningQueries(); - assertThat(currentlyRunningQueries, is(notNullValue())); - assertThat(currentlyRunningQueries.size(), is(1)); - final QueryEntity queryEntity = currentlyRunningQueries.iterator().next(); - assertThat(queryEntity.getQuery(), is("return sleep(0.2)")); - assertThat(queryEntity.getState(), is(QueryExecutionState.EXECUTING)); - } finally { - t.join(); - } - } - - @Test - @Ignore - public void getAndClearSlowQueries() throws InterruptedException, ExecutionException { - final QueryTrackingPropertiesEntity properties = db.getQueryTrackingProperties(); - final Long slowQueryThreshold = properties.getSlowQueryThreshold(); - try { - properties.setSlowQueryThreshold(1L); - db.setQueryTrackingProperties(properties); - - db.query("return sleep(1.1)", null, null, Void.class); - final Collection slowQueries = db.getSlowQueries(); - assertThat(slowQueries, is(notNullValue())); - assertThat(slowQueries.size(), is(1)); - final QueryEntity queryEntity = slowQueries.iterator().next(); - assertThat(queryEntity.getQuery(), is("return sleep(1.1)")); - - db.clearSlowQueries(); - assertThat(db.getSlowQueries().size(), is(0)); - } finally { - properties.setSlowQueryThreshold(slowQueryThreshold); - db.setQueryTrackingProperties(properties); - } - } - - @Test - @Ignore - public void killQuery() throws InterruptedException, ExecutionException { - final Thread t = new Thread() { - @Override - public void run() { - super.run(); - try { - db.query("return sleep(0.2)", null, null, Void.class); - fail(); - } catch (final ArangoDBException e) { - } - } - }; - t.start(); - Thread.sleep(100); - final Collection currentlyRunningQueries = db.getCurrentlyRunningQueries(); - assertThat(currentlyRunningQueries, is(notNullValue())); - assertThat(currentlyRunningQueries.size(), is(1)); - - final QueryEntity queryEntity = currentlyRunningQueries.iterator().next(); - db.killQuery(queryEntity.getId()); - } - - @Test - public void createGetDeleteAqlFunction() { - final Collection aqlFunctionsInitial = db.getAqlFunctions(null); - assertThat(aqlFunctionsInitial, is(empty())); - try { - db.createAqlFunction("myfunctions::temperature::celsiustofahrenheit", - "function (celsius) { return celsius * 1.8 + 32; }", null); - - final Collection aqlFunctions = db.getAqlFunctions(null); - assertThat(aqlFunctions.size(), is(greaterThan(aqlFunctionsInitial.size()))); - } finally { - db.deleteAqlFunction("myfunctions::temperature::celsiustofahrenheit", null); - - final Collection aqlFunctions = db.getAqlFunctions(null); - assertThat(aqlFunctions.size(), is(aqlFunctionsInitial.size())); - } - } - - @Test - public void createGetDeleteAqlFunctionWithNamespace() { - final Collection aqlFunctionsInitial = db.getAqlFunctions(null); - assertThat(aqlFunctionsInitial, is(empty())); - try { - db.createAqlFunction("myfunctions::temperature::celsiustofahrenheit1", - "function (celsius) { return celsius * 1.8 + 32; }", null); - db.createAqlFunction("myfunctions::temperature::celsiustofahrenheit2", - "function (celsius) { return celsius * 1.8 + 32; }", null); - - } finally { - db.deleteAqlFunction("myfunctions::temperature", new AqlFunctionDeleteOptions().group(true)); - - final Collection aqlFunctions = db.getAqlFunctions(null); - assertThat(aqlFunctions.size(), is(aqlFunctionsInitial.size())); - } - } - - @Test - public void createGraph() { - try { - final GraphEntity result = db.createGraph(GRAPH_NAME, null, null); - assertThat(result, is(notNullValue())); - assertThat(result.getName(), is(GRAPH_NAME)); - } finally { - db.graph(GRAPH_NAME).drop(); - } - } - - @Test - public void getGraphs() { - try { - db.createGraph(GRAPH_NAME, null, null); - final Collection graphs = db.getGraphs(); - assertThat(graphs, is(notNullValue())); - assertThat(graphs.size(), is(1)); - assertThat(graphs.iterator().next().getName(), is(GRAPH_NAME)); - } finally { - db.graph(GRAPH_NAME).drop(); - } - } - - @Test - public void transactionString() { - final TransactionOptions options = new TransactionOptions().params("test"); - final String result = db.transaction("function (params) {return params;}", String.class, options); - assertThat(result, is("test")); - } - - @Test - public void transactionNumber() { - final TransactionOptions options = new TransactionOptions().params(5); - final Integer result = db.transaction("function (params) {return params;}", Integer.class, options); - assertThat(result, is(5)); - } - - @Test - public void transactionVPack() throws VPackException { - final TransactionOptions options = new TransactionOptions().params(new VPackBuilder().add("test").slice()); - final VPackSlice result = db.transaction("function (params) {return params;}", VPackSlice.class, options); - assertThat(result.isString(), is(true)); - assertThat(result.getAsString(), is("test")); - } - - @Test - public void transactionVPackObject() throws VPackException { - final VPackSlice params = new VPackBuilder().add(ValueType.OBJECT).add("foo", "hello").add("bar", "world") - .close().slice(); - final TransactionOptions options = new TransactionOptions().params(params); - final String result = db.transaction("function (params) { return params['foo'] + ' ' + params['bar'];}", - String.class, options); - assertThat(result, is("hello world")); - } - - @Test - public void transactionVPackArray() throws VPackException { - final VPackSlice params = new VPackBuilder().add(ValueType.ARRAY).add("hello").add("world").close().slice(); - final TransactionOptions options = new TransactionOptions().params(params); - final String result = db.transaction("function (params) { return params[0] + ' ' + params[1];}", String.class, - options); - assertThat(result, is("hello world")); - } - - @Test - public void transactionMap() { - final Map params = new MapBuilder().put("foo", "hello").put("bar", "world").get(); - final TransactionOptions options = new TransactionOptions().params(params); - final String result = db.transaction("function (params) { return params['foo'] + ' ' + params['bar'];}", - String.class, options); - assertThat(result, is("hello world")); - } - - @Test - public void transactionArray() { - final String[] params = new String[] { "hello", "world" }; - final TransactionOptions options = new TransactionOptions().params(params); - final String result = db.transaction("function (params) { return params[0] + ' ' + params[1];}", String.class, - options); - assertThat(result, is("hello world")); - } - - @Test - public void transactionCollection() { - final Collection params = new ArrayList(); - params.add("hello"); - params.add("world"); - final TransactionOptions options = new TransactionOptions().params(params); - final String result = db.transaction("function (params) { return params[0] + ' ' + params[1];}", String.class, - options); - assertThat(result, is("hello world")); - } - - @Test - public void transactionInsertJson() { - try { - db.createCollection(COLLECTION_NAME); - final TransactionOptions options = new TransactionOptions().params("{\"_key\":\"0\"}") - .writeCollections(COLLECTION_NAME); - //@formatter:off - db.transaction("function (params) { " - + "var db = require('internal').db;" - + "db." + COLLECTION_NAME + ".save(JSON.parse(params));" - + "}", Void.class, options); - //@formatter:on - assertThat(db.collection(COLLECTION_NAME).count().getCount(), is(1L)); - assertThat(db.collection(COLLECTION_NAME).getDocument("0", String.class), is(notNullValue())); - } finally { - db.collection(COLLECTION_NAME).drop(); - } - } - - @Test - public void transactionEmpty() { - db.transaction("function () {}", null, null); - } - - @Test - public void transactionallowImplicit() { - try { - db.createCollection("someCollection", null); - db.createCollection("someOtherCollection", null); - final String action = "function (params) {" + "var db = require('internal').db;" - + "return {'a':db.someCollection.all().toArray()[0], 'b':db.someOtherCollection.all().toArray()[0]};" - + "}"; - final TransactionOptions options = new TransactionOptions().readCollections("someCollection"); - db.transaction(action, VPackSlice.class, options); - try { - options.allowImplicit(false); - db.transaction(action, VPackSlice.class, options); - fail(); - } catch (final ArangoDBException e) { - } - } finally { - db.collection("someCollection").drop(); - db.collection("someOtherCollection").drop(); - } - } - - protected static class TransactionTestEntity { - private String value; - - public TransactionTestEntity() { - super(); - } - } - - @Test - public void transactionPojoReturn() { - final String action = "function() { return {'value':'hello world'}; }"; - final TransactionTestEntity res = db.transaction(action, TransactionTestEntity.class, new TransactionOptions()); - assertThat(res, is(notNullValue())); - assertThat(res.value, is("hello world")); - } - - @Test - public void getInfo() { - final DatabaseEntity info = db.getInfo(); - assertThat(info, is(notNullValue())); - assertThat(info.getId(), is(notNullValue())); - assertThat(info.getName(), is(TEST_DB)); - assertThat(info.getPath(), is(notNullValue())); - assertThat(info.getIsSystem(), is(false)); - } - - @Test - public void executeTraversal() { - try { - db.createCollection("person", null); - db.createCollection("knows", new CollectionCreateOptions().type(CollectionType.EDGES)); - for (final String e : new String[] { "Alice", "Bob", "Charlie", "Dave", "Eve" }) { - final BaseDocument doc = new BaseDocument(); - doc.setKey(e); - db.collection("person").insertDocument(doc, null); - } - for (final String[] e : new String[][] { new String[] { "Alice", "Bob" }, new String[] { "Bob", "Charlie" }, - new String[] { "Bob", "Dave" }, new String[] { "Eve", "Alice" }, new String[] { "Eve", "Bob" } }) { - final BaseEdgeDocument edge = new BaseEdgeDocument(); - edge.setKey(e[0] + "_knows_" + e[1]); - edge.setFrom("person/" + e[0]); - edge.setTo("person/" + e[1]); - db.collection("knows").insertDocument(edge, null); - } - final TraversalOptions options = new TraversalOptions().edgeCollection("knows").startVertex("person/Alice") - .direction(Direction.outbound); - final TraversalEntity traversal = db.executeTraversal(BaseDocument.class, - BaseEdgeDocument.class, options); - - assertThat(traversal, is(notNullValue())); - - final Collection vertices = traversal.getVertices(); - assertThat(vertices, is(notNullValue())); - assertThat(vertices.size(), is(4)); - - final Iterator verticesIterator = vertices.iterator(); - final Collection v = Arrays.asList(new String[] { "Alice", "Bob", "Charlie", "Dave" }); - for (; verticesIterator.hasNext();) { - assertThat(v.contains(verticesIterator.next().getKey()), is(true)); - } - - final Collection> paths = traversal.getPaths(); - assertThat(paths, is(notNullValue())); - assertThat(paths.size(), is(4)); - - assertThat(paths.iterator().hasNext(), is(true)); - final PathEntity first = paths.iterator().next(); - assertThat(first, is(notNullValue())); - assertThat(first.getEdges().size(), is(0)); - assertThat(first.getVertices().size(), is(1)); - assertThat(first.getVertices().iterator().next().getKey(), is("Alice")); - } finally { - db.collection("person").drop(); - db.collection("knows").drop(); - } - } - - @Test - public void getDocument() { - try { - db.createCollection(COLLECTION_NAME); - final BaseDocument value = new BaseDocument(); - value.setKey("123"); - db.collection(COLLECTION_NAME).insertDocument(value); - final BaseDocument document = db.getDocument(COLLECTION_NAME + "/123", BaseDocument.class); - assertThat(document, is(notNullValue())); - assertThat(document.getKey(), is("123")); - } finally { - db.collection(COLLECTION_NAME).drop(); - } - } - - @Test - public void shouldIncludeExceptionMessage() { - final String version = db.getVersion().getVersion(); - if (version.startsWith("3.1") || version.startsWith("3.0")) { - final String exceptionMessage = "My error context"; - final String action = "function (params) {" + "throw '" + exceptionMessage + "';" + "}"; - try { - db.transaction(action, VPackSlice.class, null); - fail(); - } catch (final ArangoDBException e) { - assertTrue(e.getException().contains(exceptionMessage)); - } - } - } - - @Test(expected = ArangoDBException.class) - public void getDocumentWrongId() { - db.getDocument("123", BaseDocument.class); - } - - @Test - public void reloadRouting() { - db.reloadRouting(); - } -} diff --git a/src/test/java/com/arangodb/ArangoEdgeCollectionTest.java b/src/test/java/com/arangodb/ArangoEdgeCollectionTest.java deleted file mode 100644 index 09c7fa6c7..000000000 --- a/src/test/java/com/arangodb/ArangoEdgeCollectionTest.java +++ /dev/null @@ -1,406 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb; - -import static org.hamcrest.Matchers.hasItem; -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.not; -import static org.hamcrest.Matchers.notNullValue; -import static org.hamcrest.Matchers.nullValue; -import static org.junit.Assert.assertThat; -import static org.junit.Assert.fail; - -import java.util.ArrayList; -import java.util.Collection; - -import org.junit.After; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -import com.arangodb.ArangoDB.Builder; -import com.arangodb.entity.BaseDocument; -import com.arangodb.entity.BaseEdgeDocument; -import com.arangodb.entity.CollectionType; -import com.arangodb.entity.EdgeDefinition; -import com.arangodb.entity.EdgeEntity; -import com.arangodb.entity.EdgeUpdateEntity; -import com.arangodb.entity.VertexEntity; -import com.arangodb.model.CollectionCreateOptions; -import com.arangodb.model.DocumentReadOptions; -import com.arangodb.model.EdgeDeleteOptions; -import com.arangodb.model.EdgeReplaceOptions; -import com.arangodb.model.EdgeUpdateOptions; - -/** - * @author Mark Vollmary - * - */ -@RunWith(Parameterized.class) -public class ArangoEdgeCollectionTest extends BaseTest { - - private static final String GRAPH_NAME = "db_collection_test"; - private static final String EDGE_COLLECTION_NAME = "db_edge_collection_test"; - private static final String VERTEX_COLLECTION_NAME = "db_vertex_collection_test"; - - public ArangoEdgeCollectionTest(final Builder builder) { - super(builder); - setup(); - } - - public void setup() { - try { - db.createCollection(VERTEX_COLLECTION_NAME, null); - } catch (final ArangoDBException e) { - } - try { - db.createCollection(EDGE_COLLECTION_NAME, new CollectionCreateOptions().type(CollectionType.EDGES)); - } catch (final ArangoDBException e) { - } - final Collection edgeDefinitions = new ArrayList(); - edgeDefinitions.add(new EdgeDefinition().collection(EDGE_COLLECTION_NAME).from(VERTEX_COLLECTION_NAME) - .to(VERTEX_COLLECTION_NAME)); - db.createGraph(GRAPH_NAME, edgeDefinitions, null); - } - - @After - public void teardown() { - for (final String collection : new String[] { VERTEX_COLLECTION_NAME, EDGE_COLLECTION_NAME }) { - db.collection(collection).truncate(); - } - } - - private BaseEdgeDocument createEdgeValue() { - final VertexEntity v1 = db.graph(GRAPH_NAME).vertexCollection(VERTEX_COLLECTION_NAME) - .insertVertex(new BaseDocument(), null); - final VertexEntity v2 = db.graph(GRAPH_NAME).vertexCollection(VERTEX_COLLECTION_NAME) - .insertVertex(new BaseDocument(), null); - - final BaseEdgeDocument value = new BaseEdgeDocument(); - value.setFrom(v1.getId()); - value.setTo(v2.getId()); - return value; - } - - @Test - public void insertEdge() { - final BaseEdgeDocument value = createEdgeValue(); - final EdgeEntity edge = db.graph(GRAPH_NAME).edgeCollection(EDGE_COLLECTION_NAME).insertEdge(value, null); - assertThat(edge, is(notNullValue())); - final BaseEdgeDocument document = db.collection(EDGE_COLLECTION_NAME).getDocument(edge.getKey(), - BaseEdgeDocument.class, null); - assertThat(document, is(notNullValue())); - assertThat(document.getKey(), is(edge.getKey())); - assertThat(document.getFrom(), is(notNullValue())); - assertThat(document.getTo(), is(notNullValue())); - } - - @Test - public void insertEdgeUpdateRev() { - final BaseEdgeDocument value = createEdgeValue(); - final EdgeEntity edge = db.graph(GRAPH_NAME).edgeCollection(EDGE_COLLECTION_NAME).insertEdge(value, null); - assertThat(value.getRevision(), is(edge.getRev())); - } - - @Test - public void getEdge() { - final BaseEdgeDocument value = createEdgeValue(); - final EdgeEntity edge = db.graph(GRAPH_NAME).edgeCollection(EDGE_COLLECTION_NAME).insertEdge(value, null); - final BaseEdgeDocument document = db.graph(GRAPH_NAME).edgeCollection(EDGE_COLLECTION_NAME) - .getEdge(edge.getKey(), BaseEdgeDocument.class, null); - assertThat(document, is(notNullValue())); - assertThat(document.getKey(), is(edge.getKey())); - assertThat(document.getFrom(), is(notNullValue())); - assertThat(document.getTo(), is(notNullValue())); - } - - @Test - public void getEdgeIfMatch() { - final BaseEdgeDocument value = createEdgeValue(); - final EdgeEntity edge = db.graph(GRAPH_NAME).edgeCollection(EDGE_COLLECTION_NAME).insertEdge(value, null); - final DocumentReadOptions options = new DocumentReadOptions().ifMatch(edge.getRev()); - final BaseDocument document = db.graph(GRAPH_NAME).edgeCollection(EDGE_COLLECTION_NAME).getEdge(edge.getKey(), - BaseDocument.class, options); - assertThat(document, is(notNullValue())); - assertThat(document.getKey(), is(edge.getKey())); - } - - @Test - public void getEdgeIfMatchFail() { - final BaseEdgeDocument value = createEdgeValue(); - final EdgeEntity edge = db.graph(GRAPH_NAME).edgeCollection(EDGE_COLLECTION_NAME).insertEdge(value, null); - final DocumentReadOptions options = new DocumentReadOptions().ifMatch("no"); - final BaseEdgeDocument edge2 = db.graph(GRAPH_NAME).edgeCollection(EDGE_COLLECTION_NAME).getEdge(edge.getKey(), - BaseEdgeDocument.class, options); - assertThat(edge2, is(nullValue())); - } - - @Test - public void getEdgeIfNoneMatch() { - final BaseEdgeDocument value = createEdgeValue(); - final EdgeEntity edge = db.graph(GRAPH_NAME).edgeCollection(EDGE_COLLECTION_NAME).insertEdge(value, null); - final DocumentReadOptions options = new DocumentReadOptions().ifNoneMatch("no"); - final BaseDocument document = db.graph(GRAPH_NAME).edgeCollection(EDGE_COLLECTION_NAME).getEdge(edge.getKey(), - BaseDocument.class, options); - assertThat(document, is(notNullValue())); - assertThat(document.getKey(), is(edge.getKey())); - } - - @Test - public void getEdgeIfNoneMatchFail() { - final BaseEdgeDocument value = createEdgeValue(); - final EdgeEntity edge = db.graph(GRAPH_NAME).edgeCollection(EDGE_COLLECTION_NAME).insertEdge(value, null); - final DocumentReadOptions options = new DocumentReadOptions().ifNoneMatch(edge.getRev()); - final BaseEdgeDocument edge2 = db.graph(GRAPH_NAME).edgeCollection(EDGE_COLLECTION_NAME).getEdge(edge.getKey(), - BaseEdgeDocument.class, options); - assertThat(edge2, is(nullValue())); - } - - @Test - public void replaceEdge() { - final BaseEdgeDocument doc = createEdgeValue(); - doc.addAttribute("a", "test"); - final EdgeEntity createResult = db.graph(GRAPH_NAME).edgeCollection(EDGE_COLLECTION_NAME).insertEdge(doc, null); - doc.getProperties().clear(); - doc.addAttribute("b", "test"); - final EdgeUpdateEntity replaceResult = db.graph(GRAPH_NAME).edgeCollection(EDGE_COLLECTION_NAME) - .replaceEdge(createResult.getKey(), doc, null); - assertThat(replaceResult, is(notNullValue())); - assertThat(replaceResult.getId(), is(createResult.getId())); - assertThat(replaceResult.getRev(), is(not(replaceResult.getOldRev()))); - assertThat(replaceResult.getOldRev(), is(createResult.getRev())); - - final BaseEdgeDocument readResult = db.graph(GRAPH_NAME).edgeCollection(EDGE_COLLECTION_NAME) - .getEdge(createResult.getKey(), BaseEdgeDocument.class, null); - assertThat(readResult.getKey(), is(createResult.getKey())); - assertThat(readResult.getRevision(), is(replaceResult.getRev())); - assertThat(readResult.getProperties().keySet(), not(hasItem("a"))); - assertThat(readResult.getAttribute("b"), is(notNullValue())); - assertThat(String.valueOf(readResult.getAttribute("b")), is("test")); - } - - @Test - public void replaceEdgeUpdateRev() { - final BaseEdgeDocument doc = createEdgeValue(); - final EdgeEntity createResult = db.graph(GRAPH_NAME).edgeCollection(EDGE_COLLECTION_NAME).insertEdge(doc, null); - assertThat(doc.getRevision(), is(createResult.getRev())); - final EdgeUpdateEntity replaceResult = db.graph(GRAPH_NAME).edgeCollection(EDGE_COLLECTION_NAME) - .replaceEdge(createResult.getKey(), doc, null); - assertThat(doc.getRevision(), is(replaceResult.getRev())); - } - - @Test - public void replaceEdgeIfMatch() { - final BaseEdgeDocument doc = createEdgeValue(); - doc.addAttribute("a", "test"); - final EdgeEntity createResult = db.graph(GRAPH_NAME).edgeCollection(EDGE_COLLECTION_NAME).insertEdge(doc, null); - doc.getProperties().clear(); - doc.addAttribute("b", "test"); - final EdgeReplaceOptions options = new EdgeReplaceOptions().ifMatch(createResult.getRev()); - final EdgeUpdateEntity replaceResult = db.graph(GRAPH_NAME).edgeCollection(EDGE_COLLECTION_NAME) - .replaceEdge(createResult.getKey(), doc, options); - assertThat(replaceResult, is(notNullValue())); - assertThat(replaceResult.getId(), is(createResult.getId())); - assertThat(replaceResult.getRev(), is(not(replaceResult.getOldRev()))); - assertThat(replaceResult.getOldRev(), is(createResult.getRev())); - - final BaseEdgeDocument readResult = db.graph(GRAPH_NAME).edgeCollection(EDGE_COLLECTION_NAME) - .getEdge(createResult.getKey(), BaseEdgeDocument.class, null); - assertThat(readResult.getKey(), is(createResult.getKey())); - assertThat(readResult.getRevision(), is(replaceResult.getRev())); - assertThat(readResult.getProperties().keySet(), not(hasItem("a"))); - assertThat(readResult.getAttribute("b"), is(notNullValue())); - assertThat(String.valueOf(readResult.getAttribute("b")), is("test")); - } - - @Test - public void replaceEdgeIfMatchFail() { - final BaseEdgeDocument doc = createEdgeValue(); - doc.addAttribute("a", "test"); - final EdgeEntity createResult = db.graph(GRAPH_NAME).edgeCollection(EDGE_COLLECTION_NAME).insertEdge(doc, null); - doc.getProperties().clear(); - doc.addAttribute("b", "test"); - try { - final EdgeReplaceOptions options = new EdgeReplaceOptions().ifMatch("no"); - db.graph(GRAPH_NAME).edgeCollection(EDGE_COLLECTION_NAME).replaceEdge(createResult.getKey(), doc, options); - fail(); - } catch (final ArangoDBException e) { - } - } - - @Test - public void updateEdge() { - final BaseEdgeDocument doc = createEdgeValue(); - doc.addAttribute("a", "test"); - doc.addAttribute("c", "test"); - final EdgeEntity createResult = db.graph(GRAPH_NAME).edgeCollection(EDGE_COLLECTION_NAME).insertEdge(doc, null); - doc.updateAttribute("a", "test1"); - doc.addAttribute("b", "test"); - doc.updateAttribute("c", null); - final EdgeUpdateEntity updateResult = db.graph(GRAPH_NAME).edgeCollection(EDGE_COLLECTION_NAME) - .updateEdge(createResult.getKey(), doc, null); - assertThat(updateResult, is(notNullValue())); - assertThat(updateResult.getId(), is(createResult.getId())); - assertThat(updateResult.getRev(), is(not(updateResult.getOldRev()))); - assertThat(updateResult.getOldRev(), is(createResult.getRev())); - - final BaseEdgeDocument readResult = db.graph(GRAPH_NAME).edgeCollection(EDGE_COLLECTION_NAME) - .getEdge(createResult.getKey(), BaseEdgeDocument.class, null); - assertThat(readResult.getKey(), is(createResult.getKey())); - assertThat(readResult.getAttribute("a"), is(notNullValue())); - assertThat(String.valueOf(readResult.getAttribute("a")), is("test1")); - assertThat(readResult.getAttribute("b"), is(notNullValue())); - assertThat(String.valueOf(readResult.getAttribute("b")), is("test")); - assertThat(readResult.getRevision(), is(updateResult.getRev())); - assertThat(readResult.getProperties().keySet(), hasItem("c")); - } - - @Test - public void updateEdgeUpdateRev() { - final BaseEdgeDocument doc = createEdgeValue(); - final EdgeEntity createResult = db.graph(GRAPH_NAME).edgeCollection(EDGE_COLLECTION_NAME).insertEdge(doc, null); - assertThat(doc.getRevision(), is(createResult.getRev())); - final EdgeUpdateEntity updateResult = db.graph(GRAPH_NAME).edgeCollection(EDGE_COLLECTION_NAME) - .updateEdge(createResult.getKey(), doc, null); - assertThat(doc.getRevision(), is(updateResult.getRev())); - } - - @Test - public void updateEdgeIfMatch() { - final BaseEdgeDocument doc = createEdgeValue(); - doc.addAttribute("a", "test"); - doc.addAttribute("c", "test"); - final EdgeEntity createResult = db.graph(GRAPH_NAME).edgeCollection(EDGE_COLLECTION_NAME).insertEdge(doc, null); - doc.updateAttribute("a", "test1"); - doc.addAttribute("b", "test"); - doc.updateAttribute("c", null); - final EdgeUpdateOptions options = new EdgeUpdateOptions().ifMatch(createResult.getRev()); - final EdgeUpdateEntity updateResult = db.graph(GRAPH_NAME).edgeCollection(EDGE_COLLECTION_NAME) - .updateEdge(createResult.getKey(), doc, options); - assertThat(updateResult, is(notNullValue())); - assertThat(updateResult.getId(), is(createResult.getId())); - assertThat(updateResult.getRev(), is(not(updateResult.getOldRev()))); - assertThat(updateResult.getOldRev(), is(createResult.getRev())); - - final BaseEdgeDocument readResult = db.graph(GRAPH_NAME).edgeCollection(EDGE_COLLECTION_NAME) - .getEdge(createResult.getKey(), BaseEdgeDocument.class, null); - assertThat(readResult.getKey(), is(createResult.getKey())); - assertThat(readResult.getAttribute("a"), is(notNullValue())); - assertThat(String.valueOf(readResult.getAttribute("a")), is("test1")); - assertThat(readResult.getAttribute("b"), is(notNullValue())); - assertThat(String.valueOf(readResult.getAttribute("b")), is("test")); - assertThat(readResult.getRevision(), is(updateResult.getRev())); - assertThat(readResult.getProperties().keySet(), hasItem("c")); - } - - @Test - public void updateEdgeIfMatchFail() { - final BaseEdgeDocument doc = createEdgeValue(); - doc.addAttribute("a", "test"); - doc.addAttribute("c", "test"); - final EdgeEntity createResult = db.graph(GRAPH_NAME).edgeCollection(EDGE_COLLECTION_NAME).insertEdge(doc, null); - doc.updateAttribute("a", "test1"); - doc.addAttribute("b", "test"); - doc.updateAttribute("c", null); - try { - final EdgeUpdateOptions options = new EdgeUpdateOptions().ifMatch("no"); - db.graph(GRAPH_NAME).edgeCollection(EDGE_COLLECTION_NAME).updateEdge(createResult.getKey(), doc, options); - fail(); - } catch (final ArangoDBException e) { - } - } - - @Test - public void updateEdgeKeepNullTrue() { - final BaseEdgeDocument doc = createEdgeValue(); - doc.addAttribute("a", "test"); - final EdgeEntity createResult = db.graph(GRAPH_NAME).edgeCollection(EDGE_COLLECTION_NAME).insertEdge(doc, null); - doc.updateAttribute("a", null); - final EdgeUpdateOptions options = new EdgeUpdateOptions().keepNull(true); - final EdgeUpdateEntity updateResult = db.graph(GRAPH_NAME).edgeCollection(EDGE_COLLECTION_NAME) - .updateEdge(createResult.getKey(), doc, options); - assertThat(updateResult, is(notNullValue())); - assertThat(updateResult.getId(), is(createResult.getId())); - assertThat(updateResult.getRev(), is(not(updateResult.getOldRev()))); - assertThat(updateResult.getOldRev(), is(createResult.getRev())); - - final BaseEdgeDocument readResult = db.graph(GRAPH_NAME).edgeCollection(EDGE_COLLECTION_NAME) - .getEdge(createResult.getKey(), BaseEdgeDocument.class, null); - assertThat(readResult.getKey(), is(createResult.getKey())); - assertThat(readResult.getProperties().keySet().size(), is(1)); - assertThat(readResult.getProperties().keySet(), hasItem("a")); - } - - @Test - public void updateEdgeKeepNullFalse() { - final BaseEdgeDocument doc = createEdgeValue(); - doc.addAttribute("a", "test"); - final EdgeEntity createResult = db.graph(GRAPH_NAME).edgeCollection(EDGE_COLLECTION_NAME).insertEdge(doc, null); - doc.updateAttribute("a", null); - final EdgeUpdateOptions options = new EdgeUpdateOptions().keepNull(false); - final EdgeUpdateEntity updateResult = db.graph(GRAPH_NAME).edgeCollection(EDGE_COLLECTION_NAME) - .updateEdge(createResult.getKey(), doc, options); - assertThat(updateResult, is(notNullValue())); - assertThat(updateResult.getId(), is(createResult.getId())); - assertThat(updateResult.getRev(), is(not(updateResult.getOldRev()))); - assertThat(updateResult.getOldRev(), is(createResult.getRev())); - - final BaseEdgeDocument readResult = db.graph(GRAPH_NAME).edgeCollection(EDGE_COLLECTION_NAME) - .getEdge(createResult.getKey(), BaseEdgeDocument.class, null); - assertThat(readResult.getKey(), is(createResult.getKey())); - assertThat(readResult.getId(), is(createResult.getId())); - assertThat(readResult.getRevision(), is(notNullValue())); - assertThat(readResult.getProperties().keySet(), not(hasItem("a"))); - } - - @Test - public void deleteEdge() { - final BaseEdgeDocument doc = createEdgeValue(); - final EdgeEntity createResult = db.graph(GRAPH_NAME).edgeCollection(EDGE_COLLECTION_NAME).insertEdge(doc, null); - db.graph(GRAPH_NAME).edgeCollection(EDGE_COLLECTION_NAME).deleteEdge(createResult.getKey(), null); - final BaseEdgeDocument edge = db.graph(GRAPH_NAME).edgeCollection(EDGE_COLLECTION_NAME) - .getEdge(createResult.getKey(), BaseEdgeDocument.class, null); - assertThat(edge, is(nullValue())); - } - - @Test - public void deleteEdgeIfMatch() { - final BaseEdgeDocument doc = createEdgeValue(); - final EdgeEntity createResult = db.graph(GRAPH_NAME).edgeCollection(EDGE_COLLECTION_NAME).insertEdge(doc, null); - final EdgeDeleteOptions options = new EdgeDeleteOptions().ifMatch(createResult.getRev()); - db.graph(GRAPH_NAME).edgeCollection(EDGE_COLLECTION_NAME).deleteEdge(createResult.getKey(), options); - final BaseEdgeDocument edge = db.graph(GRAPH_NAME).edgeCollection(EDGE_COLLECTION_NAME) - .getEdge(createResult.getKey(), BaseEdgeDocument.class, null); - assertThat(edge, is(nullValue())); - } - - @Test - public void deleteEdgeIfMatchFail() { - final BaseEdgeDocument doc = createEdgeValue(); - final EdgeEntity createResult = db.graph(GRAPH_NAME).edgeCollection(EDGE_COLLECTION_NAME).insertEdge(doc, null); - final EdgeDeleteOptions options = new EdgeDeleteOptions().ifMatch("no"); - try { - db.graph(GRAPH_NAME).edgeCollection(EDGE_COLLECTION_NAME).deleteEdge(createResult.getKey(), options); - fail(); - } catch (final ArangoDBException e) { - } - } -} diff --git a/src/test/java/com/arangodb/ArangoExecuteableTest.java b/src/test/java/com/arangodb/ArangoExecuteableTest.java deleted file mode 100644 index f4bc5ed38..000000000 --- a/src/test/java/com/arangodb/ArangoExecuteableTest.java +++ /dev/null @@ -1,82 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb; - -import org.junit.Test; - -import com.arangodb.internal.ArangoExecutor; - -/** - * @author Mark Vollmary - * - */ -public class ArangoExecuteableTest { - - @Test - public void validateDocumentKeyValid() { - checkDocumentKey("1test"); - checkDocumentKey("test1"); - checkDocumentKey("test-1"); - checkDocumentKey("test_1"); - checkDocumentKey("_test"); - } - - @Test(expected = ArangoDBException.class) - public void validateDocumentKeyInvalidSlash() { - checkDocumentKey("test/test"); - } - - @Test(expected = ArangoDBException.class) - public void validateDocumentKeyEmpty() { - checkDocumentKey(""); - } - - private void checkDocumentKey(final String key) throws ArangoDBException { - final ArangoExecutor executeBase = new ArangoExecutor(null, null) { - }; - executeBase.validateDocumentKey(key); - } - - @Test - public void validateDocumentIdValid() { - checkDocumentId("1test/1test"); - checkDocumentId("test1/test1"); - checkDocumentId("test-1/test-1"); - checkDocumentId("test_1/test_1"); - checkDocumentId("_test/_test"); - } - - @Test(expected = ArangoDBException.class) - public void validateDocumentIdInvalidWithoutSlash() { - checkDocumentId("test"); - } - - @Test(expected = ArangoDBException.class) - public void validateDocumentIdEmpty() { - checkDocumentId(""); - } - - private void checkDocumentId(final String id) throws ArangoDBException { - final ArangoExecutor executeBase = new ArangoExecutor(null, null) { - }; - executeBase.validateDocumentId(id); - } -} diff --git a/src/test/java/com/arangodb/ArangoGraphTest.java b/src/test/java/com/arangodb/ArangoGraphTest.java deleted file mode 100644 index 01ec20e58..000000000 --- a/src/test/java/com/arangodb/ArangoGraphTest.java +++ /dev/null @@ -1,239 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb; - -import static org.hamcrest.Matchers.empty; -import static org.hamcrest.Matchers.hasItem; -import static org.hamcrest.Matchers.hasItems; -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.notNullValue; -import static org.junit.Assert.assertThat; - -import java.util.ArrayList; -import java.util.Collection; -import java.util.Iterator; - -import org.junit.After; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -import com.arangodb.ArangoDB.Builder; -import com.arangodb.entity.ArangoDBVersion.License; -import com.arangodb.entity.CollectionType; -import com.arangodb.entity.EdgeDefinition; -import com.arangodb.entity.GraphEntity; -import com.arangodb.model.CollectionCreateOptions; -import com.arangodb.model.GraphCreateOptions; - -/** - * @author Mark Vollmary - * - */ -@RunWith(Parameterized.class) -public class ArangoGraphTest extends BaseTest { - - private static final String GRAPH_NAME = "db_collection_test"; - private static final String EDGE_COL_1 = "db_edge1_collection_test"; - private static final String EDGE_COL_2 = "db_edge2_collection_test"; - private static final String EDGE_COL_3 = "db_edge3_collection_test"; - private static final String VERTEX_COL_1 = "db_vertex1_collection_test"; - private static final String VERTEX_COL_2 = "db_vertex2_collection_test"; - private static final String VERTEX_COL_3 = "db_vertex3_collection_test"; - private static final String VERTEX_COL_4 = "db_vertex4_collection_test"; - - public ArangoGraphTest(final Builder builder) { - super(builder); - setup(); - } - - public void setup() { - try { - db.graph(GRAPH_NAME).drop(); - } catch (final ArangoDBException e1) { - } - for (final String collection : new String[] { VERTEX_COL_1, VERTEX_COL_2, VERTEX_COL_2, VERTEX_COL_3, - VERTEX_COL_4 }) { - try { - db.createCollection(collection, null); - } catch (final ArangoDBException e) { - } - } - for (final String collection : new String[] { EDGE_COL_1, EDGE_COL_2 }) { - try { - final CollectionCreateOptions options = new CollectionCreateOptions().type(CollectionType.EDGES); - db.createCollection(collection, options); - } catch (final ArangoDBException e) { - } - } - final Collection edgeDefinitions = new ArrayList(); - edgeDefinitions.add(new EdgeDefinition().collection(EDGE_COL_1).from(VERTEX_COL_1).to(VERTEX_COL_2)); - edgeDefinitions - .add(new EdgeDefinition().collection(EDGE_COL_2).from(VERTEX_COL_2).to(VERTEX_COL_1, VERTEX_COL_3)); - db.createGraph(GRAPH_NAME, edgeDefinitions, null); - } - - @After - public void teardown() { - for (final String collection : new String[] { EDGE_COL_1, EDGE_COL_2, VERTEX_COL_1, VERTEX_COL_2, VERTEX_COL_3, - VERTEX_COL_4 }) { - db.collection(collection).truncate(); - } - } - - @Test - public void exists() { - assertThat(db.graph(GRAPH_NAME).exists(), is(true)); - assertThat(db.graph(GRAPH_NAME + "no").exists(), is(false)); - } - - @Test - public void getGraphs() { - final Collection graphs = db.getGraphs(); - assertThat(graphs, is(notNullValue())); - assertThat(graphs.size(), is(1)); - assertThat(graphs.iterator().next().getName(), is(GRAPH_NAME)); - } - - @Test - public void getInfo() { - final GraphEntity info = db.graph(GRAPH_NAME).getInfo(); - assertThat(info, is(notNullValue())); - assertThat(info.getName(), is(GRAPH_NAME)); - assertThat(info.getEdgeDefinitions().size(), is(2)); - final Iterator iterator = info.getEdgeDefinitions().iterator(); - final EdgeDefinition e1 = iterator.next(); - assertThat(e1.getCollection(), is(EDGE_COL_1)); - assertThat(e1.getFrom(), hasItem(VERTEX_COL_1)); - assertThat(e1.getTo(), hasItem(VERTEX_COL_2)); - final EdgeDefinition e2 = iterator.next(); - assertThat(e2.getCollection(), is(EDGE_COL_2)); - assertThat(e2.getFrom(), hasItem(VERTEX_COL_2)); - assertThat(e2.getTo(), hasItems(VERTEX_COL_1, VERTEX_COL_3)); - assertThat(info.getOrphanCollections(), is(empty())); - } - - @Test - public void getVertexCollections() { - final Collection vertexCollections = db.graph(GRAPH_NAME).getVertexCollections(); - assertThat(vertexCollections, is(notNullValue())); - assertThat(vertexCollections.size(), is(3)); - assertThat(vertexCollections, hasItems(VERTEX_COL_1, VERTEX_COL_2, VERTEX_COL_3)); - } - - @Test - public void addVertexCollection() { - final GraphEntity graph = db.graph(GRAPH_NAME).addVertexCollection(VERTEX_COL_4); - assertThat(graph, is(notNullValue())); - final Collection vertexCollections = db.graph(GRAPH_NAME).getVertexCollections(); - assertThat(vertexCollections, hasItems(VERTEX_COL_1, VERTEX_COL_2, VERTEX_COL_3, VERTEX_COL_4)); - setup(); - } - - @Test - public void getEdgeCollections() { - final Collection edgeCollections = db.graph(GRAPH_NAME).getEdgeDefinitions(); - assertThat(edgeCollections, is(notNullValue())); - assertThat(edgeCollections.size(), is(2)); - assertThat(edgeCollections, hasItems(EDGE_COL_1, EDGE_COL_2)); - } - - @Test - public void addEdgeDefinition() { - final GraphEntity graph = db.graph(GRAPH_NAME) - .addEdgeDefinition(new EdgeDefinition().collection(EDGE_COL_3).from(VERTEX_COL_1).to(VERTEX_COL_2)); - assertThat(graph, is(notNullValue())); - final Collection edgeDefinitions = graph.getEdgeDefinitions(); - assertThat(edgeDefinitions.size(), is(3)); - int count = 0; - for (final EdgeDefinition e : edgeDefinitions) { - if (e.getCollection().equals(EDGE_COL_3)) { - count++; - } - } - assertThat(count, is(1)); - for (final EdgeDefinition e : edgeDefinitions) { - if (e.getCollection().equals(EDGE_COL_3)) { - assertThat(e.getFrom(), hasItem(VERTEX_COL_1)); - assertThat(e.getTo(), hasItem(VERTEX_COL_2)); - } - } - setup(); - } - - @Test - public void replaceEdgeDefinition() { - final GraphEntity graph = db.graph(GRAPH_NAME) - .replaceEdgeDefinition(new EdgeDefinition().collection(EDGE_COL_1).from(VERTEX_COL_3).to(VERTEX_COL_4)); - final Collection edgeDefinitions = graph.getEdgeDefinitions(); - assertThat(edgeDefinitions.size(), is(2)); - int count = 0; - for (final EdgeDefinition e : edgeDefinitions) { - if (e.getCollection().equals(EDGE_COL_1)) { - count++; - } - } - assertThat(count, is(1)); - for (final EdgeDefinition e : edgeDefinitions) { - if (e.getCollection().equals(EDGE_COL_1)) { - assertThat(e.getFrom(), hasItem(VERTEX_COL_3)); - assertThat(e.getTo(), hasItem(VERTEX_COL_4)); - } - } - setup(); - } - - @Test - public void removeEdgeDefinition() { - final GraphEntity graph = db.graph(GRAPH_NAME).removeEdgeDefinition(EDGE_COL_1); - final Collection edgeDefinitions = graph.getEdgeDefinitions(); - assertThat(edgeDefinitions.size(), is(1)); - assertThat(edgeDefinitions.iterator().next().getCollection(), is(EDGE_COL_2)); - setup(); - } - - @Test - public void smartGraph() { - if (arangoDB.getVersion().getLicense() == License.ENTERPRISE) { - for (final String collection : new String[] { EDGE_COL_1, EDGE_COL_2, VERTEX_COL_1, VERTEX_COL_2, - VERTEX_COL_3, VERTEX_COL_4 }) { - try { - db.collection(collection).drop(); - } catch (final ArangoDBException e) { - } - } - try { - db.graph(GRAPH_NAME).drop(); - } catch (final ArangoDBException e) { - } - final Collection edgeDefinitions = new ArrayList(); - edgeDefinitions.add(new EdgeDefinition().collection(EDGE_COL_1).from(VERTEX_COL_1).to(VERTEX_COL_2)); - edgeDefinitions - .add(new EdgeDefinition().collection(EDGE_COL_2).from(VERTEX_COL_2).to(VERTEX_COL_1, VERTEX_COL_3)); - final GraphEntity graph = db.createGraph(GRAPH_NAME, edgeDefinitions, - new GraphCreateOptions().isSmart(true).smartGraphAttribute("test").numberOfShards(2)); - assertThat(graph, is(notNullValue())); - assertThat(graph.getIsSmart(), is(true)); - assertThat(graph.getSmartGraphAttribute(), is("test")); - assertThat(graph.getNumberOfShards(), is(2)); - } - } -} diff --git a/src/test/java/com/arangodb/ArangoSslTest.java b/src/test/java/com/arangodb/ArangoSslTest.java deleted file mode 100644 index 795f7592d..000000000 --- a/src/test/java/com/arangodb/ArangoSslTest.java +++ /dev/null @@ -1,94 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb; - -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.notNullValue; -import static org.junit.Assert.assertThat; -import static org.junit.Assert.fail; - -import java.security.KeyStore; - -import javax.net.ssl.KeyManagerFactory; -import javax.net.ssl.SSLContext; -import javax.net.ssl.SSLHandshakeException; -import javax.net.ssl.TrustManagerFactory; - -import org.junit.Ignore; -import org.junit.Test; - -import com.arangodb.entity.ArangoDBVersion; - -/** - * @author Mark Vollmary - * - */ -public class ArangoSslTest { - - /*- - * a SSL trust store - * - * create the trust store for the self signed certificate: - * keytool -import -alias "my arangodb server cert" -file UnitTests/server.pem -keystore example.truststore - * - * Documentation: - * https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache/http/conn/ssl/SSLSocketFactory.html - */ - private static final String SSL_TRUSTSTORE = "/example.truststore"; - private static final String SSL_TRUSTSTORE_PASSWORD = "12345678"; - - @Test - @Ignore - public void connect() throws Exception { - final KeyStore ks = KeyStore.getInstance(KeyStore.getDefaultType()); - ks.load(this.getClass().getResourceAsStream(SSL_TRUSTSTORE), SSL_TRUSTSTORE_PASSWORD.toCharArray()); - - final KeyManagerFactory kmf = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm()); - kmf.init(ks, SSL_TRUSTSTORE_PASSWORD.toCharArray()); - - final TrustManagerFactory tmf = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm()); - tmf.init(ks); - - final SSLContext sc = SSLContext.getInstance("TLS"); - sc.init(kmf.getKeyManagers(), tmf.getTrustManagers(), null); - - final ArangoDB arangoDB = new ArangoDB.Builder() - .loadProperties(ArangoSslTest.class.getResourceAsStream("/arangodb-ssl.properties")).useSsl(true) - .sslContext(sc).build(); - final ArangoDBVersion version = arangoDB.getVersion(); - assertThat(version, is(notNullValue())); - } - - @Test - @Ignore - public void connectWithoutValidSslContext() throws Exception { - try { - final ArangoDB arangoDB = new ArangoDB.Builder() - .loadProperties(ArangoSslTest.class.getResourceAsStream("/arangodb-ssl.properties")).useSsl(true) - .build(); - arangoDB.getVersion(); - fail("this should fail"); - } catch (final ArangoDBException ex) { - assertThat(ex.getCause() instanceof SSLHandshakeException, is(true)); - } - } - -} diff --git a/src/test/java/com/arangodb/ArangoVertexCollectionTest.java b/src/test/java/com/arangodb/ArangoVertexCollectionTest.java deleted file mode 100644 index 11a4755a6..000000000 --- a/src/test/java/com/arangodb/ArangoVertexCollectionTest.java +++ /dev/null @@ -1,396 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb; - -import static org.hamcrest.Matchers.hasItem; -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.not; -import static org.hamcrest.Matchers.notNullValue; -import static org.hamcrest.Matchers.nullValue; -import static org.junit.Assert.assertThat; -import static org.junit.Assert.fail; - -import java.util.Collection; - -import org.junit.After; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -import com.arangodb.ArangoDB.Builder; -import com.arangodb.entity.BaseDocument; -import com.arangodb.entity.VertexEntity; -import com.arangodb.entity.VertexUpdateEntity; -import com.arangodb.model.DocumentReadOptions; -import com.arangodb.model.GraphCreateOptions; -import com.arangodb.model.VertexDeleteOptions; -import com.arangodb.model.VertexReplaceOptions; -import com.arangodb.model.VertexUpdateOptions; - -/** - * @author Mark Vollmary - * - */ -@RunWith(Parameterized.class) -public class ArangoVertexCollectionTest extends BaseTest { - - private static final String GRAPH_NAME = "db_collection_test"; - private static final String COLLECTION_NAME = "db_vertex_collection_test"; - - public ArangoVertexCollectionTest(final Builder builder) { - super(builder); - setup(); - } - - public void setup() { - try { - db.createCollection(COLLECTION_NAME, null); - } catch (final ArangoDBException e) { - } - final GraphCreateOptions options = new GraphCreateOptions().orphanCollections(COLLECTION_NAME); - db.createGraph(GRAPH_NAME, null, options); - } - - @After - public void teardown() { - db.collection(COLLECTION_NAME).truncate(); - } - - @Test - public void dropVertexCollection() { - db.graph(GRAPH_NAME).vertexCollection(COLLECTION_NAME).drop(); - final Collection vertexCollections = db.graph(GRAPH_NAME).getVertexCollections(); - assertThat(vertexCollections, not(hasItem(COLLECTION_NAME))); - } - - @Test - public void insertVertex() { - final VertexEntity vertex = db.graph(GRAPH_NAME).vertexCollection(COLLECTION_NAME) - .insertVertex(new BaseDocument(), null); - assertThat(vertex, is(notNullValue())); - final BaseDocument document = db.collection(COLLECTION_NAME).getDocument(vertex.getKey(), BaseDocument.class, - null); - assertThat(document, is(notNullValue())); - assertThat(document.getKey(), is(vertex.getKey())); - } - - @Test - public void insertVertexUpdateRev() { - final BaseDocument doc = new BaseDocument(); - final VertexEntity vertex = db.graph(GRAPH_NAME).vertexCollection(COLLECTION_NAME).insertVertex(doc, null); - assertThat(doc.getRevision(), is(vertex.getRev())); - } - - @Test - public void getVertex() { - final VertexEntity vertex = db.graph(GRAPH_NAME).vertexCollection(COLLECTION_NAME) - .insertVertex(new BaseDocument(), null); - final BaseDocument document = db.graph(GRAPH_NAME).vertexCollection(COLLECTION_NAME).getVertex(vertex.getKey(), - BaseDocument.class, null); - assertThat(document, is(notNullValue())); - assertThat(document.getKey(), is(vertex.getKey())); - } - - @Test - public void getVertexIfMatch() { - final VertexEntity vertex = db.graph(GRAPH_NAME).vertexCollection(COLLECTION_NAME) - .insertVertex(new BaseDocument(), null); - final DocumentReadOptions options = new DocumentReadOptions().ifMatch(vertex.getRev()); - final BaseDocument document = db.graph(GRAPH_NAME).vertexCollection(COLLECTION_NAME).getVertex(vertex.getKey(), - BaseDocument.class, options); - assertThat(document, is(notNullValue())); - assertThat(document.getKey(), is(vertex.getKey())); - } - - @Test - public void getVertexIfMatchFail() { - final VertexEntity vertex = db.graph(GRAPH_NAME).vertexCollection(COLLECTION_NAME) - .insertVertex(new BaseDocument(), null); - final DocumentReadOptions options = new DocumentReadOptions().ifMatch("no"); - final BaseDocument vertex2 = db.graph(GRAPH_NAME).vertexCollection(COLLECTION_NAME).getVertex(vertex.getKey(), - BaseDocument.class, options); - assertThat(vertex2, is(nullValue())); - } - - @Test - public void getVertexIfNoneMatch() { - final VertexEntity vertex = db.graph(GRAPH_NAME).vertexCollection(COLLECTION_NAME) - .insertVertex(new BaseDocument(), null); - final DocumentReadOptions options = new DocumentReadOptions().ifNoneMatch("no"); - final BaseDocument document = db.graph(GRAPH_NAME).vertexCollection(COLLECTION_NAME).getVertex(vertex.getKey(), - BaseDocument.class, options); - assertThat(document, is(notNullValue())); - assertThat(document.getKey(), is(vertex.getKey())); - } - - @Test - public void getVertexIfNoneMatchFail() { - final VertexEntity vertex = db.graph(GRAPH_NAME).vertexCollection(COLLECTION_NAME) - .insertVertex(new BaseDocument(), null); - final DocumentReadOptions options = new DocumentReadOptions().ifNoneMatch(vertex.getRev()); - final BaseDocument vertex2 = db.graph(GRAPH_NAME).vertexCollection(COLLECTION_NAME).getVertex(vertex.getKey(), - BaseDocument.class, options); - assertThat(vertex2, is(nullValue())); - } - - @Test - public void replaceVertex() { - final BaseDocument doc = new BaseDocument(); - doc.addAttribute("a", "test"); - final VertexEntity createResult = db.graph(GRAPH_NAME).vertexCollection(COLLECTION_NAME).insertVertex(doc, - null); - doc.getProperties().clear(); - doc.addAttribute("b", "test"); - final VertexUpdateEntity replaceResult = db.graph(GRAPH_NAME).vertexCollection(COLLECTION_NAME) - .replaceVertex(createResult.getKey(), doc, null); - assertThat(replaceResult, is(notNullValue())); - assertThat(replaceResult.getId(), is(createResult.getId())); - assertThat(replaceResult.getRev(), is(not(replaceResult.getOldRev()))); - assertThat(replaceResult.getOldRev(), is(createResult.getRev())); - - final BaseDocument readResult = db.graph(GRAPH_NAME).vertexCollection(COLLECTION_NAME) - .getVertex(createResult.getKey(), BaseDocument.class, null); - assertThat(readResult.getKey(), is(createResult.getKey())); - assertThat(readResult.getRevision(), is(replaceResult.getRev())); - assertThat(readResult.getProperties().keySet(), not(hasItem("a"))); - assertThat(readResult.getAttribute("b"), is(notNullValue())); - assertThat(String.valueOf(readResult.getAttribute("b")), is("test")); - } - - @Test - public void replaceVertexUpdateRev() { - final BaseDocument doc = new BaseDocument(); - final VertexEntity createResult = db.graph(GRAPH_NAME).vertexCollection(COLLECTION_NAME).insertVertex(doc, - null); - assertThat(doc.getRevision(), is(createResult.getRev())); - final VertexUpdateEntity replaceResult = db.graph(GRAPH_NAME).vertexCollection(COLLECTION_NAME) - .replaceVertex(createResult.getKey(), doc, null); - assertThat(doc.getRevision(), is(replaceResult.getRev())); - } - - @Test - public void replaceVertexIfMatch() { - final BaseDocument doc = new BaseDocument(); - doc.addAttribute("a", "test"); - final VertexEntity createResult = db.graph(GRAPH_NAME).vertexCollection(COLLECTION_NAME).insertVertex(doc, - null); - doc.getProperties().clear(); - doc.addAttribute("b", "test"); - final VertexReplaceOptions options = new VertexReplaceOptions().ifMatch(createResult.getRev()); - final VertexUpdateEntity replaceResult = db.graph(GRAPH_NAME).vertexCollection(COLLECTION_NAME) - .replaceVertex(createResult.getKey(), doc, options); - assertThat(replaceResult, is(notNullValue())); - assertThat(replaceResult.getId(), is(createResult.getId())); - assertThat(replaceResult.getRev(), is(not(replaceResult.getOldRev()))); - assertThat(replaceResult.getOldRev(), is(createResult.getRev())); - - final BaseDocument readResult = db.graph(GRAPH_NAME).vertexCollection(COLLECTION_NAME) - .getVertex(createResult.getKey(), BaseDocument.class, null); - assertThat(readResult.getKey(), is(createResult.getKey())); - assertThat(readResult.getRevision(), is(replaceResult.getRev())); - assertThat(readResult.getProperties().keySet(), not(hasItem("a"))); - assertThat(readResult.getAttribute("b"), is(notNullValue())); - assertThat(String.valueOf(readResult.getAttribute("b")), is("test")); - } - - @Test - public void replaceVertexIfMatchFail() { - final BaseDocument doc = new BaseDocument(); - doc.addAttribute("a", "test"); - final VertexEntity createResult = db.graph(GRAPH_NAME).vertexCollection(COLLECTION_NAME).insertVertex(doc, - null); - doc.getProperties().clear(); - doc.addAttribute("b", "test"); - try { - final VertexReplaceOptions options = new VertexReplaceOptions().ifMatch("no"); - db.graph(GRAPH_NAME).vertexCollection(COLLECTION_NAME).replaceVertex(createResult.getKey(), doc, options); - fail(); - } catch (final ArangoDBException e) { - } - } - - @Test - public void updateVertex() { - final BaseDocument doc = new BaseDocument(); - doc.addAttribute("a", "test"); - doc.addAttribute("c", "test"); - final VertexEntity createResult = db.graph(GRAPH_NAME).vertexCollection(COLLECTION_NAME).insertVertex(doc, - null); - doc.updateAttribute("a", "test1"); - doc.addAttribute("b", "test"); - doc.updateAttribute("c", null); - final VertexUpdateEntity updateResult = db.graph(GRAPH_NAME).vertexCollection(COLLECTION_NAME) - .updateVertex(createResult.getKey(), doc, null); - assertThat(updateResult, is(notNullValue())); - assertThat(updateResult.getId(), is(createResult.getId())); - assertThat(updateResult.getRev(), is(not(updateResult.getOldRev()))); - assertThat(updateResult.getOldRev(), is(createResult.getRev())); - - final BaseDocument readResult = db.graph(GRAPH_NAME).vertexCollection(COLLECTION_NAME) - .getVertex(createResult.getKey(), BaseDocument.class, null); - assertThat(readResult.getKey(), is(createResult.getKey())); - assertThat(readResult.getAttribute("a"), is(notNullValue())); - assertThat(String.valueOf(readResult.getAttribute("a")), is("test1")); - assertThat(readResult.getAttribute("b"), is(notNullValue())); - assertThat(String.valueOf(readResult.getAttribute("b")), is("test")); - assertThat(readResult.getRevision(), is(updateResult.getRev())); - assertThat(readResult.getProperties().keySet(), hasItem("c")); - } - - @Test - public void updateVertexUpdateRev() { - final BaseDocument doc = new BaseDocument(); - final VertexEntity createResult = db.graph(GRAPH_NAME).vertexCollection(COLLECTION_NAME).insertVertex(doc, - null); - assertThat(doc.getRevision(), is(createResult.getRev())); - final VertexUpdateEntity updateResult = db.graph(GRAPH_NAME).vertexCollection(COLLECTION_NAME) - .updateVertex(createResult.getKey(), doc, null); - assertThat(doc.getRevision(), is(updateResult.getRev())); - } - - @Test - public void updateVertexIfMatch() { - final BaseDocument doc = new BaseDocument(); - doc.addAttribute("a", "test"); - doc.addAttribute("c", "test"); - final VertexEntity createResult = db.graph(GRAPH_NAME).vertexCollection(COLLECTION_NAME).insertVertex(doc, - null); - doc.updateAttribute("a", "test1"); - doc.addAttribute("b", "test"); - doc.updateAttribute("c", null); - final VertexUpdateOptions options = new VertexUpdateOptions().ifMatch(createResult.getRev()); - final VertexUpdateEntity updateResult = db.graph(GRAPH_NAME).vertexCollection(COLLECTION_NAME) - .updateVertex(createResult.getKey(), doc, options); - assertThat(updateResult, is(notNullValue())); - assertThat(updateResult.getId(), is(createResult.getId())); - assertThat(updateResult.getRev(), is(not(updateResult.getOldRev()))); - assertThat(updateResult.getOldRev(), is(createResult.getRev())); - - final BaseDocument readResult = db.graph(GRAPH_NAME).vertexCollection(COLLECTION_NAME) - .getVertex(createResult.getKey(), BaseDocument.class, null); - assertThat(readResult.getKey(), is(createResult.getKey())); - assertThat(readResult.getAttribute("a"), is(notNullValue())); - assertThat(String.valueOf(readResult.getAttribute("a")), is("test1")); - assertThat(readResult.getAttribute("b"), is(notNullValue())); - assertThat(String.valueOf(readResult.getAttribute("b")), is("test")); - assertThat(readResult.getRevision(), is(updateResult.getRev())); - assertThat(readResult.getProperties().keySet(), hasItem("c")); - } - - @Test - public void updateVertexIfMatchFail() { - final BaseDocument doc = new BaseDocument(); - doc.addAttribute("a", "test"); - doc.addAttribute("c", "test"); - final VertexEntity createResult = db.graph(GRAPH_NAME).vertexCollection(COLLECTION_NAME).insertVertex(doc, - null); - doc.updateAttribute("a", "test1"); - doc.addAttribute("b", "test"); - doc.updateAttribute("c", null); - try { - final VertexUpdateOptions options = new VertexUpdateOptions().ifMatch("no"); - db.graph(GRAPH_NAME).vertexCollection(COLLECTION_NAME).updateVertex(createResult.getKey(), doc, options); - fail(); - } catch (final ArangoDBException e) { - } - } - - @Test - public void updateVertexKeepNullTrue() { - final BaseDocument doc = new BaseDocument(); - doc.addAttribute("a", "test"); - final VertexEntity createResult = db.graph(GRAPH_NAME).vertexCollection(COLLECTION_NAME).insertVertex(doc, - null); - doc.updateAttribute("a", null); - final VertexUpdateOptions options = new VertexUpdateOptions().keepNull(true); - final VertexUpdateEntity updateResult = db.graph(GRAPH_NAME).vertexCollection(COLLECTION_NAME) - .updateVertex(createResult.getKey(), doc, options); - assertThat(updateResult, is(notNullValue())); - assertThat(updateResult.getId(), is(createResult.getId())); - assertThat(updateResult.getRev(), is(not(updateResult.getOldRev()))); - assertThat(updateResult.getOldRev(), is(createResult.getRev())); - - final BaseDocument readResult = db.graph(GRAPH_NAME).vertexCollection(COLLECTION_NAME) - .getVertex(createResult.getKey(), BaseDocument.class, null); - assertThat(readResult.getKey(), is(createResult.getKey())); - assertThat(readResult.getProperties().keySet().size(), is(1)); - assertThat(readResult.getProperties().keySet(), hasItem("a")); - } - - @Test - public void updateVertexKeepNullFalse() { - final BaseDocument doc = new BaseDocument(); - doc.addAttribute("a", "test"); - final VertexEntity createResult = db.graph(GRAPH_NAME).vertexCollection(COLLECTION_NAME).insertVertex(doc, - null); - doc.updateAttribute("a", null); - final VertexUpdateOptions options = new VertexUpdateOptions().keepNull(false); - final VertexUpdateEntity updateResult = db.graph(GRAPH_NAME).vertexCollection(COLLECTION_NAME) - .updateVertex(createResult.getKey(), doc, options); - assertThat(updateResult, is(notNullValue())); - assertThat(updateResult.getId(), is(createResult.getId())); - assertThat(updateResult.getRev(), is(not(updateResult.getOldRev()))); - assertThat(updateResult.getOldRev(), is(createResult.getRev())); - - final BaseDocument readResult = db.graph(GRAPH_NAME).vertexCollection(COLLECTION_NAME) - .getVertex(createResult.getKey(), BaseDocument.class, null); - assertThat(readResult.getKey(), is(createResult.getKey())); - assertThat(readResult.getId(), is(createResult.getId())); - assertThat(readResult.getRevision(), is(notNullValue())); - assertThat(readResult.getProperties().keySet(), not(hasItem("a"))); - } - - @Test - public void deleteVertex() { - final BaseDocument doc = new BaseDocument(); - final VertexEntity createResult = db.graph(GRAPH_NAME).vertexCollection(COLLECTION_NAME).insertVertex(doc, - null); - db.graph(GRAPH_NAME).vertexCollection(COLLECTION_NAME).deleteVertex(createResult.getKey(), null); - final BaseDocument vertex = db.graph(GRAPH_NAME).vertexCollection(COLLECTION_NAME) - .getVertex(createResult.getKey(), BaseDocument.class, null); - assertThat(vertex, is(nullValue())); - } - - @Test - public void deleteVertexIfMatch() { - final BaseDocument doc = new BaseDocument(); - final VertexEntity createResult = db.graph(GRAPH_NAME).vertexCollection(COLLECTION_NAME).insertVertex(doc, - null); - final VertexDeleteOptions options = new VertexDeleteOptions().ifMatch(createResult.getRev()); - db.graph(GRAPH_NAME).vertexCollection(COLLECTION_NAME).deleteVertex(createResult.getKey(), options); - final BaseDocument vertex = db.graph(GRAPH_NAME).vertexCollection(COLLECTION_NAME) - .getVertex(createResult.getKey(), BaseDocument.class, null); - assertThat(vertex, is(nullValue())); - } - - @Test - public void deleteVertexIfMatchFail() { - final BaseDocument doc = new BaseDocument(); - final VertexEntity createResult = db.graph(GRAPH_NAME).vertexCollection(COLLECTION_NAME).insertVertex(doc, - null); - final VertexDeleteOptions options = new VertexDeleteOptions().ifMatch("no"); - try { - db.graph(GRAPH_NAME).vertexCollection(COLLECTION_NAME).deleteVertex(createResult.getKey(), options); - fail(); - } catch (final ArangoDBException e) { - } - } -} diff --git a/src/test/java/com/arangodb/BaseTest.java b/src/test/java/com/arangodb/BaseTest.java deleted file mode 100644 index ad6734357..000000000 --- a/src/test/java/com/arangodb/BaseTest.java +++ /dev/null @@ -1,67 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb; - -import java.util.Arrays; -import java.util.Collection; - -import org.junit.AfterClass; -import org.junit.runners.Parameterized.Parameters; - -/** - * @author Mark Vollmary - * - */ -public abstract class BaseTest { - - @Parameters - public static Collection builders() { - return Arrays.asList(new ArangoDB.Builder().useProtocol(Protocol.VST), - new ArangoDB.Builder().useProtocol(Protocol.HTTP_JSON), - new ArangoDB.Builder().useProtocol(Protocol.HTTP_VPACK)); - } - - protected static final String TEST_DB = "java_driver_test_db"; - protected static ArangoDB arangoDB; - protected static ArangoDatabase db; - - public BaseTest(final ArangoDB.Builder builder) { - super(); - if (arangoDB != null) { - shutdown(); - } - arangoDB = builder.build(); - try { - arangoDB.db(TEST_DB).drop(); - } catch (final ArangoDBException e) { - } - arangoDB.createDatabase(TEST_DB); - db = arangoDB.db(TEST_DB); - } - - @AfterClass - public static void shutdown() { - arangoDB.db(TEST_DB).drop(); - arangoDB.shutdown(); - arangoDB = null; - } - -} diff --git a/src/test/java/com/arangodb/DocumentTest.java b/src/test/java/com/arangodb/DocumentTest.java deleted file mode 100644 index 60c1a1216..000000000 --- a/src/test/java/com/arangodb/DocumentTest.java +++ /dev/null @@ -1,131 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb; - -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.notNullValue; -import static org.junit.Assert.assertThat; - -import java.util.Map; - -import org.junit.After; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -import com.arangodb.ArangoDB.Builder; -import com.arangodb.entity.BaseDocument; -import com.arangodb.entity.DocumentCreateEntity; - -/** - * @author Mark Vollmary - * - */ -@RunWith(Parameterized.class) -public class DocumentTest extends BaseTest { - - private static final String COLLECTION_NAME = "collection_test"; - private ArangoCollection collection; - - public DocumentTest(final Builder builder) { - super(builder); - setup(); - } - - public void setup() { - db.createCollection(COLLECTION_NAME); - collection = db.collection(COLLECTION_NAME); - } - - @After - public void teardown() { - collection.truncate(); - } - - @SuppressWarnings("unchecked") - @Test - public void insertAsJson() { - //@formatter:off - final String json = - "{" - + "\"article\": {" - + "\"artist\": \"PREGARDIEN/RHEINISCHE KANTOREI/DAS\"," - + "\"releaseDate\": \"1970-01-01\"," - + "\"composer\": \"BACH\"," - + "\"format\": \"CD\"," - + "\"vat\": \"H\"," - + "\"carriers\": 1," - + "\"label\": \"CAPRICCIO\"," - + "\"title\": \"BACH ST MATTHEW PASSION BWV244\"," - + "\"barcode\": [" - + "\"4006408600466\"" - + "]," - + "\"conductor\": \"MAX, H.\"" - + "}," - + "\"stock\": {" - + "\"status\": \"RMV\"," - + "\"lastUpdate\": \"2016-11-01 00:00\"" - + "}" - + "}"; - //@formatter:on - final DocumentCreateEntity createResult = collection.insertDocument(json); - final BaseDocument doc = collection.getDocument(createResult.getKey(), BaseDocument.class); - assertThat(doc, is(notNullValue())); - final Object article = doc.getAttribute("article"); - assertThat(article, is(notNullValue())); - final Object artist = ((Map) article).get("artist"); - assertThat(artist, is(notNullValue())); - assertThat(artist.toString(), is("PREGARDIEN/RHEINISCHE KANTOREI/DAS")); - } - - @SuppressWarnings("unchecked") - @Test - public void insertAsBaseDocument() { - final BaseDocument document = new BaseDocument(); - { - final BaseDocument article = new BaseDocument(); - document.addAttribute("article", article); - article.addAttribute("artist", "PREGARDIEN/RHEINISCHE KANTOREI/DAS"); - article.addAttribute("releaseDate", "1970-01-01"); - article.addAttribute("composer", "BACH"); - article.addAttribute("format", "CD"); - article.addAttribute("vat", "H"); - article.addAttribute("carriers", 1); - article.addAttribute("label", "CAPRICCIO"); - article.addAttribute("title", "BACH ST MATTHEW PASSION BWV244"); - article.addAttribute("barcode", new String[] { "4006408600466" }); - article.addAttribute("conductor", "MAX, H."); - final BaseDocument stock = new BaseDocument(); - document.addAttribute("stock", stock); - stock.addAttribute("status", "RMV"); - stock.addAttribute("lastUpdate", "2016-11-01 00:00"); - } - final DocumentCreateEntity createResult = collection.insertDocument(document); - final BaseDocument doc = collection.getDocument(createResult.getKey(), BaseDocument.class); - assertThat(doc, is(notNullValue())); - final Object article = doc.getAttribute("article"); - assertThat(article, is(notNullValue())); - final Object artist = ((Map) article).get("artist"); - assertThat(artist, is(notNullValue())); - assertThat(artist.toString(), is("PREGARDIEN/RHEINISCHE KANTOREI/DAS")); - } - -} diff --git a/src/test/java/com/arangodb/UserAuthTest.java b/src/test/java/com/arangodb/UserAuthTest.java deleted file mode 100644 index 7d068ed8b..000000000 --- a/src/test/java/com/arangodb/UserAuthTest.java +++ /dev/null @@ -1,908 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2017 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb; - -import static org.hamcrest.Matchers.hasItem; -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.not; -import static org.hamcrest.Matchers.notNullValue; -import static org.hamcrest.Matchers.nullValue; -import static org.junit.Assert.assertThat; -import static org.junit.Assert.fail; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; - -import org.junit.AfterClass; -import org.junit.Ignore; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -import org.junit.runners.Parameterized.Parameters; - -import com.arangodb.entity.BaseDocument; -import com.arangodb.entity.CollectionEntity; -import com.arangodb.entity.IndexEntity; -import com.arangodb.entity.Permissions; -import com.arangodb.model.AqlQueryOptions; -import com.arangodb.model.CollectionPropertiesOptions; -import com.arangodb.model.HashIndexOptions; -import com.arangodb.model.UserUpdateOptions; -import com.arangodb.util.MapBuilder; - -/** - * @author Mark Vollmary - * - */ -@RunWith(Parameterized.class) -@Ignore -public class UserAuthTest { - - private static final String DB_NAME = "AuthUnitTestDB"; - private static final String DB_NAME_NEW = DB_NAME + "new"; - private static final String COLLECTION_NAME = "AuthUnitTestCollection"; - private static final String COLLECTION_NAME_NEW = COLLECTION_NAME + "new"; - private static final String USER_NAME = "AuthUnitTestUser"; - private static final String USER_NAME_NEW = USER_NAME + "new"; - - public static class UserAuthParam { - Protocol protocol; - Permissions systemPermission; - Permissions dbPermission; - Permissions colPermission; - - public UserAuthParam(final Protocol protocol, final Permissions systemPermission, - final Permissions dbPermission, final Permissions colPermission) { - super(); - this.protocol = protocol; - this.systemPermission = systemPermission; - this.dbPermission = dbPermission; - this.colPermission = colPermission; - } - - } - - @Parameters - public static Collection params() { - final Collection params = new ArrayList(); - final Permissions[] permissions = new Permissions[] { Permissions.RW, Permissions.RO, Permissions.NONE }; - for (final Protocol protocol : new Protocol[] { Protocol.VST, Protocol.HTTP_JSON, Protocol.HTTP_VPACK }) { - for (final Permissions systemPermission : permissions) { - for (final Permissions dbPermission : permissions) { - for (final Permissions colPermission : permissions) { - params.add(new UserAuthParam(protocol, systemPermission, dbPermission, colPermission)); - } - } - } - } - return params; - } - - private static ArangoDB arangoDB; - private static ArangoDB arangoDBRoot; - private final UserAuthParam param; - private final String details; - - public UserAuthTest(final UserAuthParam param) { - super(); - this.param = param; - if (arangoDB != null || arangoDBRoot != null) { - shutdown(); - } - arangoDBRoot = new ArangoDB.Builder().useProtocol(param.protocol).build(); - arangoDBRoot.createUser(USER_NAME, ""); - arangoDB = new ArangoDB.Builder().useProtocol(param.protocol).user(USER_NAME).build(); - arangoDBRoot.createDatabase(DB_NAME); - arangoDBRoot.db(DB_NAME).createCollection(COLLECTION_NAME); - arangoDBRoot.db().grantAccess(USER_NAME, param.systemPermission); - arangoDBRoot.db(DB_NAME).grantAccess(USER_NAME, param.dbPermission); - arangoDBRoot.db(DB_NAME).collection(COLLECTION_NAME).grantAccess(USER_NAME, param.colPermission); - details = new StringBuffer().append(param.protocol).append("_").append(param.systemPermission).append("_") - .append(param.dbPermission).append("_").append(param.colPermission).toString(); - } - - @AfterClass - public static void shutdown() { - arangoDBRoot.deleteUser(USER_NAME); - try { - arangoDBRoot.db(DB_NAME).drop(); - } catch (final ArangoDBException e) { - } - if (arangoDB != null) { - arangoDB.shutdown(); - } - arangoDBRoot.shutdown(); - arangoDB = null; - arangoDBRoot = null; - } - - @Test - public void createDatabase() { - try { - if (Permissions.RW.equals(param.systemPermission)) { - try { - assertThat(details, arangoDB.createDatabase(DB_NAME_NEW), is(true)); - } catch (final ArangoDBException e) { - fail(details); - } - assertThat(details, arangoDBRoot.getDatabases(), hasItem(DB_NAME_NEW)); - } else { - try { - arangoDB.createDatabase(DB_NAME_NEW); - fail(details); - } catch (final ArangoDBException e) { - } - assertThat(details, arangoDBRoot.getDatabases(), not(hasItem(DB_NAME_NEW))); - } - } finally { - try { - arangoDBRoot.db(DB_NAME_NEW).drop(); - } catch (final ArangoDBException e) { - } - } - } - - @Test - public void dropDatabase() { - try { - arangoDBRoot.createDatabase(DB_NAME_NEW); - if (Permissions.RW.equals(param.systemPermission)) { - try { - assertThat(details, arangoDB.db(DB_NAME).drop(), is(true)); - } catch (final ArangoDBException e) { - fail(details); - } - assertThat(details, arangoDBRoot.getDatabases(), not(hasItem(DB_NAME))); - } else { - try { - arangoDB.db(DB_NAME).drop(); - fail(details); - } catch (final ArangoDBException e) { - } - assertThat(details, arangoDBRoot.getDatabases(), hasItem(DB_NAME)); - } - } finally { - try { - arangoDBRoot.db(DB_NAME_NEW).drop(); - } catch (final ArangoDBException e) { - } - } - } - - @Test - public void createUser() { - try { - if (Permissions.RW.equals(param.systemPermission)) { - try { - arangoDB.createUser(USER_NAME_NEW, ""); - } catch (final ArangoDBException e) { - fail(details); - } - assertThat(details, arangoDBRoot.getUsers(), is(notNullValue())); - } else { - try { - arangoDB.createUser(USER_NAME_NEW, ""); - fail(details); - } catch (final ArangoDBException e) { - } - try { - arangoDBRoot.getUser(USER_NAME_NEW); - fail(details); - } catch (final ArangoDBException e) { - } - } - } finally { - try { - arangoDBRoot.deleteUser(USER_NAME_NEW); - } catch (final ArangoDBException e) { - } - } - } - - @Test - public void deleteUser() { - try { - arangoDBRoot.createUser(USER_NAME_NEW, ""); - if (Permissions.RW.equals(param.systemPermission)) { - try { - arangoDB.deleteUser(USER_NAME_NEW); - } catch (final ArangoDBException e) { - fail(details); - } - try { - arangoDBRoot.getUser(USER_NAME_NEW); - fail(details); - } catch (final ArangoDBException e) { - } - } else { - try { - arangoDB.deleteUser(USER_NAME_NEW); - fail(details); - } catch (final ArangoDBException e) { - } - assertThat(details, arangoDBRoot.getUsers(), is(notNullValue())); - } - } finally { - try { - arangoDBRoot.deleteUser(USER_NAME_NEW); - } catch (final ArangoDBException e) { - } - } - } - - @Test - public void updateUser() { - try { - arangoDBRoot.createUser(USER_NAME_NEW, ""); - if (Permissions.RW.equals(param.systemPermission)) { - try { - arangoDB.updateUser(USER_NAME_NEW, new UserUpdateOptions().active(false)); - } catch (final ArangoDBException e) { - fail(details); - } - assertThat(details, arangoDBRoot.getUser(USER_NAME_NEW).getActive(), is(false)); - } else { - try { - arangoDB.updateUser(USER_NAME_NEW, new UserUpdateOptions().active(false)); - fail(details); - } catch (final ArangoDBException e) { - } - assertThat(details, arangoDBRoot.getUser(USER_NAME_NEW).getActive(), is(true)); - } - } finally { - try { - arangoDBRoot.deleteUser(USER_NAME_NEW); - } catch (final ArangoDBException e) { - } - } - } - - @Test - public void grantUserDBAccess() { - try { - arangoDBRoot.createUser(USER_NAME_NEW, ""); - if (Permissions.RW.equals(param.systemPermission)) { - try { - arangoDB.db().grantAccess(USER_NAME_NEW); - } catch (final ArangoDBException e) { - fail(details); - } - } else { - try { - arangoDB.db().grantAccess(USER_NAME_NEW); - fail(details); - } catch (final ArangoDBException e) { - } - } - } finally { - try { - arangoDBRoot.deleteUser(USER_NAME_NEW); - } catch (final ArangoDBException e) { - } - } - } - - @Test - public void resetUserDBAccess() { - try { - arangoDBRoot.createUser(USER_NAME_NEW, ""); - arangoDBRoot.db().grantAccess(USER_NAME_NEW); - if (Permissions.RW.equals(param.systemPermission)) { - try { - arangoDB.db(DB_NAME).resetAccess(USER_NAME_NEW); - } catch (final ArangoDBException e) { - fail(details); - } - } else { - try { - arangoDB.db(DB_NAME).resetAccess(USER_NAME_NEW); - fail(details); - } catch (final ArangoDBException e) { - } - } - } finally { - try { - arangoDBRoot.deleteUser(USER_NAME_NEW); - } catch (final ArangoDBException e) { - } - } - } - - @Test - public void grantUserCollcetionAccess() { - try { - arangoDBRoot.createUser(USER_NAME_NEW, ""); - if (Permissions.RW.equals(param.systemPermission)) { - try { - arangoDB.db(DB_NAME).collection(COLLECTION_NAME).grantAccess(USER_NAME_NEW, Permissions.RW); - } catch (final ArangoDBException e) { - fail(details); - } - } else { - try { - arangoDB.db(DB_NAME).collection(COLLECTION_NAME).grantAccess(USER_NAME_NEW, Permissions.RW); - fail(details); - } catch (final ArangoDBException e) { - } - } - } finally { - try { - arangoDBRoot.deleteUser(USER_NAME_NEW); - } catch (final ArangoDBException e) { - } - } - } - - @Test - public void resetUserCollectionAccess() { - try { - arangoDBRoot.createUser(USER_NAME_NEW, ""); - arangoDBRoot.db().grantAccess(USER_NAME_NEW); - if (Permissions.RW.equals(param.systemPermission)) { - try { - arangoDB.db(DB_NAME).collection(COLLECTION_NAME).resetAccess(USER_NAME_NEW); - } catch (final ArangoDBException e) { - fail(details); - } - } else { - try { - arangoDB.db(DB_NAME).collection(COLLECTION_NAME).resetAccess(USER_NAME_NEW); - fail(details); - } catch (final ArangoDBException e) { - } - } - } finally { - try { - arangoDBRoot.deleteUser(USER_NAME_NEW); - } catch (final ArangoDBException e) { - } - } - } - - @Test - public void updateUserDefaultDatabaseAccess() { - try { - arangoDBRoot.createUser(USER_NAME_NEW, ""); - arangoDBRoot.db().grantAccess(USER_NAME_NEW); - if (Permissions.RW.equals(param.systemPermission)) { - try { - arangoDB.grantDefaultDatabaseAccess(USER_NAME_NEW, Permissions.RW); - } catch (final ArangoDBException e) { - fail(details); - } - } else { - try { - arangoDB.grantDefaultDatabaseAccess(USER_NAME_NEW, Permissions.RW); - fail(details); - } catch (final ArangoDBException e) { - } - } - } finally { - try { - arangoDBRoot.deleteUser(USER_NAME_NEW); - } catch (final ArangoDBException e) { - } - } - } - - @Test - public void updateUserDefaultCollectionAccess() { - try { - arangoDBRoot.createUser(USER_NAME_NEW, ""); - arangoDBRoot.db().grantAccess(USER_NAME_NEW); - if (Permissions.RW.equals(param.systemPermission)) { - try { - arangoDB.grantDefaultCollectionAccess(USER_NAME_NEW, Permissions.RW); - } catch (final ArangoDBException e) { - fail(details); - } - } else { - try { - arangoDB.grantDefaultCollectionAccess(USER_NAME_NEW, Permissions.RW); - fail(details); - } catch (final ArangoDBException e) { - } - } - } finally { - try { - arangoDBRoot.deleteUser(USER_NAME_NEW); - } catch (final ArangoDBException e) { - } - } - } - - @Test - public void createCollection() { - try { - if (Permissions.RW.equals(param.dbPermission)) { - try { - arangoDB.db(DB_NAME).createCollection(COLLECTION_NAME_NEW); - } catch (final ArangoDBException e) { - fail(details); - } - assertThat(details, arangoDBRoot.db(DB_NAME).collection(COLLECTION_NAME_NEW).getInfo(), - is(notNullValue())); - } else { - try { - arangoDB.db(DB_NAME).createCollection(COLLECTION_NAME_NEW); - fail(details); - } catch (final ArangoDBException e) { - } - try { - arangoDBRoot.db(DB_NAME).collection(COLLECTION_NAME_NEW).getInfo(); - fail(details); - } catch (final ArangoDBException e) { - } - } - } finally { - try { - arangoDBRoot.db(DB_NAME).collection(COLLECTION_NAME_NEW).drop(); - } catch (final ArangoDBException e) { - } - } - } - - @Test - public void dropCollection() { - try { - arangoDBRoot.db(DB_NAME).createCollection(COLLECTION_NAME_NEW); - arangoDBRoot.db(DB_NAME).collection(COLLECTION_NAME_NEW).grantAccess(USER_NAME, param.colPermission); - if (Permissions.RW.equals(param.dbPermission) && Permissions.RW.equals(param.colPermission)) { - try { - arangoDB.db(DB_NAME).collection(COLLECTION_NAME_NEW).drop(); - } catch (final ArangoDBException e) { - fail(details); - } - try { - arangoDBRoot.db(DB_NAME).collection(COLLECTION_NAME_NEW).getInfo(); - fail(details); - } catch (final ArangoDBException e) { - } - } else { - try { - arangoDB.db(DB_NAME).collection(COLLECTION_NAME_NEW).drop(); - fail(details); - } catch (final ArangoDBException e) { - } - assertThat(details, arangoDBRoot.db(DB_NAME).collection(COLLECTION_NAME_NEW).getInfo(), - is(notNullValue())); - } - } finally { - try { - arangoDBRoot.db(DB_NAME).collection(COLLECTION_NAME_NEW).drop(); - } catch (final ArangoDBException e) { - } - } - } - - @Test - public void seeCollection() { - if ((Permissions.RW.equals(param.dbPermission) || Permissions.RO.equals(param.dbPermission)) - && (Permissions.RW.equals(param.colPermission) || Permissions.RO.equals(param.colPermission))) { - try { - final Collection collections = arangoDB.db(DB_NAME).getCollections(); - boolean found = false; - for (final CollectionEntity collection : collections) { - if (collection.getName().equals(COLLECTION_NAME)) { - found = true; - break; - } - } - assertThat(details, found, is(true)); - } catch (final ArangoDBException e) { - fail(details); - } - } else if (Permissions.RW.equals(param.dbPermission) || Permissions.RO.equals(param.dbPermission)) { - final Collection collections = arangoDB.db(DB_NAME).getCollections(); - boolean found = false; - for (final CollectionEntity collection : collections) { - if (collection.getName().equals(COLLECTION_NAME)) { - found = true; - break; - } - } - assertThat(details, found, is(false)); - } - } - - @Test - public void readCollectionInfo() { - if ((Permissions.RW.equals(param.dbPermission) || Permissions.RO.equals(param.dbPermission)) - && (Permissions.RW.equals(param.colPermission) || Permissions.RO.equals(param.colPermission))) { - try { - assertThat(details, arangoDB.db(DB_NAME).collection(COLLECTION_NAME).getInfo(), is(notNullValue())); - } catch (final ArangoDBException e) { - fail(details); - } - } else { - try { - arangoDB.db(DB_NAME).collection(COLLECTION_NAME).getInfo(); - fail(details); - } catch (final ArangoDBException e) { - } - } - } - - @Test - public void readCollectionProperties() { - if ((Permissions.RW.equals(param.dbPermission) || Permissions.RO.equals(param.dbPermission)) - && (Permissions.RW.equals(param.colPermission) || Permissions.RO.equals(param.colPermission))) { - try { - assertThat(details, arangoDB.db(DB_NAME).collection(COLLECTION_NAME).getProperties(), - is(notNullValue())); - } catch (final ArangoDBException e) { - fail(details); - } - } else { - try { - arangoDB.db(DB_NAME).collection(COLLECTION_NAME).getProperties(); - fail(details); - } catch (final ArangoDBException e) { - } - } - } - - @Test - public void writeCollectionProperties() { - if (Permissions.RW.equals(param.dbPermission) && Permissions.RW.equals(param.colPermission)) { - try { - assertThat(details, arangoDB.db(DB_NAME).collection(COLLECTION_NAME) - .changeProperties(new CollectionPropertiesOptions().waitForSync(true)), - is(notNullValue())); - } catch (final ArangoDBException e) { - fail(details); - } - assertThat(details, arangoDBRoot.db(DB_NAME).collection(COLLECTION_NAME).getProperties().getWaitForSync(), - is(true)); - } else { - try { - arangoDB.db(DB_NAME).collection(COLLECTION_NAME) - .changeProperties(new CollectionPropertiesOptions().waitForSync(true)); - fail(details); - } catch (final ArangoDBException e) { - } - assertThat(details, arangoDBRoot.db(DB_NAME).collection(COLLECTION_NAME).getProperties().getWaitForSync(), - is(false)); - } - } - - @Test - public void readCollectionIndexes() { - if ((Permissions.RW.equals(param.dbPermission) || Permissions.RO.equals(param.dbPermission)) - && (Permissions.RW.equals(param.colPermission) || Permissions.RO.equals(param.colPermission))) { - try { - assertThat(details, arangoDB.db(DB_NAME).collection(COLLECTION_NAME).getIndexes(), is(notNullValue())); - } catch (final ArangoDBException e) { - fail(details); - } - } else { - try { - arangoDB.db(DB_NAME).collection(COLLECTION_NAME).getIndexes(); - fail(details); - } catch (final ArangoDBException e) { - } - } - } - - @Test - public void createCollectionIndex() { - String id = null; - try { - if (Permissions.RW.equals(param.dbPermission) && Permissions.RW.equals(param.colPermission)) { - try { - final IndexEntity createHashIndex = arangoDB.db(DB_NAME).collection(COLLECTION_NAME) - .ensureHashIndex(Arrays.asList("a"), new HashIndexOptions()); - assertThat(details, createHashIndex, is(notNullValue())); - id = createHashIndex.getId(); - } catch (final ArangoDBException e) { - fail(details); - } - assertThat(details, arangoDBRoot.db(DB_NAME).collection(COLLECTION_NAME).getIndexes().size(), is(2)); - } else { - try { - final IndexEntity createHashIndex = arangoDB.db(DB_NAME).collection(COLLECTION_NAME) - .ensureHashIndex(Arrays.asList("a"), new HashIndexOptions()); - id = createHashIndex.getId(); - fail(details); - } catch (final ArangoDBException e) { - } - assertThat(details, arangoDBRoot.db(DB_NAME).collection(COLLECTION_NAME).getIndexes().size(), is(1)); - } - } finally { - if (id != null) { - arangoDBRoot.db(DB_NAME).collection(COLLECTION_NAME).deleteIndex(id); - } - } - } - - @Test - public void dropCollectionIndex() { - final String id = arangoDBRoot.db(DB_NAME).collection(COLLECTION_NAME) - .ensureHashIndex(Arrays.asList("a"), new HashIndexOptions()).getId(); - try { - if (Permissions.RW.equals(param.dbPermission) && Permissions.RW.equals(param.colPermission)) { - try { - arangoDB.db(DB_NAME).collection(COLLECTION_NAME).deleteIndex(id); - } catch (final ArangoDBException e) { - fail(details); - } - assertThat(details, arangoDBRoot.db(DB_NAME).collection(COLLECTION_NAME).getIndexes().size(), is(1)); - } else { - try { - arangoDB.db(DB_NAME).collection(COLLECTION_NAME).deleteIndex(id); - fail(details); - } catch (final ArangoDBException e) { - } - assertThat(details, arangoDBRoot.db(DB_NAME).collection(COLLECTION_NAME).getIndexes().size(), is(2)); - } - } finally { - if (id != null) { - try { - arangoDBRoot.db(DB_NAME).collection(COLLECTION_NAME).deleteIndex(id); - } catch (final ArangoDBException e) { - } - } - } - } - - @Test - public void truncateCollection() { - try { - arangoDBRoot.db(DB_NAME).collection(COLLECTION_NAME).insertDocument(new BaseDocument("123")); - if ((Permissions.RW.equals(param.dbPermission) || Permissions.RO.equals(param.dbPermission)) - && Permissions.RW.equals(param.colPermission)) { - try { - arangoDB.db(DB_NAME).collection(COLLECTION_NAME).truncate(); - } catch (final ArangoDBException e) { - fail(details); - } - assertThat(details, arangoDBRoot.db(DB_NAME).collection(COLLECTION_NAME).documentExists("123"), - is(false)); - } else { - try { - arangoDB.db(DB_NAME).collection(COLLECTION_NAME).truncate(); - fail(details); - } catch (final ArangoDBException e) { - } - assertThat(details, arangoDBRoot.db(DB_NAME).collection(COLLECTION_NAME).documentExists("123"), - is(true)); - } - } finally { - try { - arangoDBRoot.db(DB_NAME).collection(COLLECTION_NAME).truncate(); - } catch (final ArangoDBException e) { - } - } - } - - @Test - public void readDocumentByKey() { - try { - arangoDBRoot.db(DB_NAME).collection(COLLECTION_NAME).insertDocument(new BaseDocument("123")); - if ((Permissions.RW.equals(param.dbPermission) || Permissions.RO.equals(param.dbPermission)) - && (Permissions.RW.equals(param.colPermission) || Permissions.RO.equals(param.colPermission))) { - assertThat(details, - arangoDB.db(DB_NAME).collection(COLLECTION_NAME).getDocument("123", BaseDocument.class), - is(notNullValue())); - } else { - assertThat(details, - arangoDB.db(DB_NAME).collection(COLLECTION_NAME).getDocument("123", BaseDocument.class), - is(nullValue())); - } - } finally { - try { - arangoDBRoot.db(DB_NAME).collection(COLLECTION_NAME).deleteDocument("123"); - } catch (final ArangoDBException e) { - } - } - } - - @Test - public void readDocumentByAql() { - try { - arangoDBRoot.db(DB_NAME).collection(COLLECTION_NAME).insertDocument(new BaseDocument("123")); - if ((Permissions.RW.equals(param.dbPermission) || Permissions.RO.equals(param.dbPermission)) - && (Permissions.RW.equals(param.colPermission) || Permissions.RO.equals(param.colPermission))) { - assertThat(details, - arangoDB.db(DB_NAME).query("FOR i IN @@col RETURN i", - new MapBuilder().put("@col", COLLECTION_NAME).get(), new AqlQueryOptions(), BaseDocument.class) - .asListRemaining().size(), - is(1)); - } else { - assertThat(details, - arangoDB.db(DB_NAME).collection(COLLECTION_NAME).getDocument("123", BaseDocument.class), - is(nullValue())); - } - } finally { - try { - arangoDBRoot.db(DB_NAME).collection(COLLECTION_NAME).deleteDocument("123"); - } catch (final ArangoDBException e) { - } - } - } - - @Test - public void insertDocument() { - try { - if ((Permissions.RW.equals(param.dbPermission) || Permissions.RO.equals(param.dbPermission)) - && Permissions.RW.equals(param.colPermission)) { - try { - arangoDB.db(DB_NAME).collection(COLLECTION_NAME).insertDocument(new BaseDocument("123")); - } catch (final ArangoDBException e) { - fail(details); - } - assertThat(details, arangoDBRoot.db(DB_NAME).collection(COLLECTION_NAME).documentExists("123"), - is(true)); - } else { - try { - arangoDB.db(DB_NAME).collection(COLLECTION_NAME).insertDocument(new BaseDocument("123")); - fail(details); - } catch (final ArangoDBException e) { - } - assertThat(details, arangoDBRoot.db(DB_NAME).collection(COLLECTION_NAME).documentExists("123"), - is(false)); - } - } finally { - try { - arangoDBRoot.db(DB_NAME).collection(COLLECTION_NAME).deleteDocument("123"); - } catch (final ArangoDBException e) { - } - } - } - - @Test - public void updateDocumentByKey() { - try { - arangoDBRoot.db(DB_NAME).collection(COLLECTION_NAME).insertDocument(new BaseDocument("123")); - if ((Permissions.RW.equals(param.dbPermission) || Permissions.RO.equals(param.dbPermission)) - && Permissions.RW.equals(param.colPermission)) { - try { - arangoDB.db(DB_NAME).collection(COLLECTION_NAME).updateDocument("123", - new BaseDocument(new MapBuilder().put("test", "test").get())); - } catch (final ArangoDBException e) { - fail(details); - } - assertThat(details, arangoDBRoot.db(DB_NAME).collection(COLLECTION_NAME) - .getDocument("123", BaseDocument.class).getAttribute("test").toString(), - is("test")); - } else { - try { - arangoDB.db(DB_NAME).collection(COLLECTION_NAME).updateDocument("123", - new BaseDocument(new MapBuilder().put("test", "test").get())); - fail(details); - } catch (final ArangoDBException e) { - } - assertThat(details, arangoDBRoot.db(DB_NAME).collection(COLLECTION_NAME) - .getDocument("123", BaseDocument.class).getAttribute("test"), - is(nullValue())); - } - } finally { - try { - arangoDBRoot.db(DB_NAME).collection(COLLECTION_NAME).deleteDocument("123"); - } catch (final ArangoDBException e) { - } - } - } - - @Test - public void updateDocumentByAql() { - try { - arangoDBRoot.db(DB_NAME).collection(COLLECTION_NAME).insertDocument(new BaseDocument("123")); - if ((Permissions.RW.equals(param.dbPermission) || Permissions.RO.equals(param.dbPermission)) - && Permissions.RW.equals(param.colPermission)) { - try { - arangoDB.db(DB_NAME).query("FOR i IN @@col UPDATE i WITH @newDoc IN @@col", - new MapBuilder().put("@col", COLLECTION_NAME) - .put("newDoc", new BaseDocument(new MapBuilder().put("test", "test").get())).get(), - new AqlQueryOptions(), Void.class); - } catch (final ArangoDBException e) { - fail(details); - } - assertThat(details, arangoDBRoot.db(DB_NAME).collection(COLLECTION_NAME) - .getDocument("123", BaseDocument.class).getAttribute("test").toString(), - is("test")); - } else { - try { - arangoDB.db(DB_NAME).query("FOR i IN @@col UPDATE i WITH @newDoc IN @@col", - new MapBuilder().put("@col", COLLECTION_NAME) - .put("newDoc", new BaseDocument(new MapBuilder().put("test", "test").get())).get(), - new AqlQueryOptions(), Void.class); - fail(details); - } catch (final ArangoDBException e) { - } - assertThat(details, arangoDBRoot.db(DB_NAME).collection(COLLECTION_NAME) - .getDocument("123", BaseDocument.class).getAttribute("test"), - is(nullValue())); - } - } finally { - try { - arangoDBRoot.db(DB_NAME).collection(COLLECTION_NAME).deleteDocument("123"); - } catch (final ArangoDBException e) { - } - } - } - - @Test - public void deleteDocumentByKey() { - try { - arangoDBRoot.db(DB_NAME).collection(COLLECTION_NAME).insertDocument(new BaseDocument("123")); - if ((Permissions.RW.equals(param.dbPermission) || Permissions.RO.equals(param.dbPermission)) - && Permissions.RW.equals(param.colPermission)) { - try { - arangoDB.db(DB_NAME).collection(COLLECTION_NAME).deleteDocument("123"); - } catch (final ArangoDBException e) { - fail(details); - } - assertThat(details, arangoDBRoot.db(DB_NAME).collection(COLLECTION_NAME).documentExists("123"), - is(false)); - } else { - try { - arangoDB.db(DB_NAME).collection(COLLECTION_NAME).deleteDocument("123"); - fail(details); - } catch (final ArangoDBException e) { - } - assertThat(details, arangoDBRoot.db(DB_NAME).collection(COLLECTION_NAME).documentExists("123"), - is(true)); - } - } finally { - try { - arangoDBRoot.db(DB_NAME).collection(COLLECTION_NAME).deleteDocument("123"); - } catch (final ArangoDBException e) { - } - } - } - - @Test - public void deleteDocumentByAql() { - try { - arangoDBRoot.db(DB_NAME).collection(COLLECTION_NAME).insertDocument(new BaseDocument("123")); - if ((Permissions.RW.equals(param.dbPermission) || Permissions.RO.equals(param.dbPermission)) - && Permissions.RW.equals(param.colPermission)) { - try { - arangoDB.db(DB_NAME).query("REMOVE @key IN @@col", - new MapBuilder().put("key", "123").put("@col", COLLECTION_NAME).get(), new AqlQueryOptions(), - Void.class); - } catch (final ArangoDBException e) { - fail(details); - } - assertThat(details, arangoDBRoot.db(DB_NAME).collection(COLLECTION_NAME).documentExists("123"), - is(false)); - } else { - try { - arangoDB.db(DB_NAME).query("REMOVE @key IN @@col", - new MapBuilder().put("key", "123").put("@col", COLLECTION_NAME).get(), new AqlQueryOptions(), - Void.class); - fail(details); - } catch (final ArangoDBException e) { - } - assertThat(details, arangoDBRoot.db(DB_NAME).collection(COLLECTION_NAME).documentExists("123"), - is(true)); - } - } finally { - try { - arangoDBRoot.db(DB_NAME).collection(COLLECTION_NAME).deleteDocument("123"); - } catch (final ArangoDBException e) { - } - } - } - -} diff --git a/src/test/java/com/arangodb/entity/BaseDocumentTest.java b/src/test/java/com/arangodb/entity/BaseDocumentTest.java deleted file mode 100644 index f71c9e565..000000000 --- a/src/test/java/com/arangodb/entity/BaseDocumentTest.java +++ /dev/null @@ -1,97 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.entity; - -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.notNullValue; -import static org.junit.Assert.assertThat; - -import org.junit.Test; - -import com.arangodb.internal.velocypack.VPackDriverModule; -import com.arangodb.velocypack.VPack; -import com.arangodb.velocypack.VPack.Builder; -import com.arangodb.velocypack.VPackBuilder; -import com.arangodb.velocypack.VPackSlice; -import com.arangodb.velocypack.ValueType; -import com.arangodb.velocypack.exception.VPackException; - -/** - * @author Mark Vollmary - * - */ -public class BaseDocumentTest { - - @Test - public void serialize() throws VPackException { - final BaseDocument entity = new BaseDocument(); - entity.setKey("test"); - entity.setRevision("test"); - entity.addAttribute("a", "a"); - - final Builder builder = new VPack.Builder(); - builder.registerModule(new VPackDriverModule()); - final VPack vpacker = builder.build(); - - final VPackSlice vpack = vpacker.serialize(entity); - assertThat(vpack, is(notNullValue())); - assertThat(vpack.isObject(), is(true)); - assertThat(vpack.size(), is(3)); - - final VPackSlice key = vpack.get("_key"); - assertThat(key.isString(), is(true)); - assertThat(key.getAsString(), is("test")); - - final VPackSlice rev = vpack.get("_rev"); - assertThat(rev.isString(), is(true)); - assertThat(rev.getAsString(), is("test")); - - final VPackSlice a = vpack.get("a"); - assertThat(a.isString(), is(true)); - assertThat(a.getAsString(), is("a")); - } - - @Test - public void deserialize() throws VPackException { - final VPackBuilder builder = new VPackBuilder(); - builder.add(ValueType.OBJECT); - builder.add("_id", "test/test"); - builder.add("_key", "test"); - builder.add("_rev", "test"); - builder.add("a", "a"); - builder.close(); - - final VPack.Builder vbuilder = new VPack.Builder(); - vbuilder.registerModule(new VPackDriverModule()); - final VPack vpacker = vbuilder.build(); - - final BaseDocument entity = vpacker.deserialize(builder.slice(), BaseDocument.class); - assertThat(entity.getId(), is(notNullValue())); - assertThat(entity.getId(), is("test/test")); - assertThat(entity.getKey(), is(notNullValue())); - assertThat(entity.getKey(), is("test")); - assertThat(entity.getRevision(), is(notNullValue())); - assertThat(entity.getRevision(), is("test")); - assertThat(entity.getProperties().size(), is(1)); - assertThat(String.valueOf(entity.getAttribute("a")), is("a")); - } - -} diff --git a/src/test/java/com/arangodb/example/ExampleBase.java b/src/test/java/com/arangodb/example/ExampleBase.java deleted file mode 100644 index f3bb8cb76..000000000 --- a/src/test/java/com/arangodb/example/ExampleBase.java +++ /dev/null @@ -1,63 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.example; - -import org.junit.AfterClass; -import org.junit.BeforeClass; - -import com.arangodb.ArangoCollection; -import com.arangodb.ArangoDB; -import com.arangodb.ArangoDBException; -import com.arangodb.ArangoDatabase; - -/** - * @author Mark Vollmary - * - */ -public class ExampleBase { - - protected static final String DB_NAME = "json_example_db"; - protected static final String COLLECTION_NAME = "json_example_collection"; - - protected static ArangoDB arangoDB; - protected static ArangoDatabase db; - protected static ArangoCollection collection; - - @BeforeClass - public static void setUp() { - arangoDB = new ArangoDB.Builder().build(); - try { - arangoDB.db(DB_NAME).drop(); - } catch (final ArangoDBException e) { - } - arangoDB.createDatabase(DB_NAME); - db = arangoDB.db(DB_NAME); - db.createCollection(COLLECTION_NAME); - collection = db.collection(COLLECTION_NAME); - } - - @AfterClass - public static void tearDown() { - db.drop(); - arangoDB.shutdown(); - } - -} diff --git a/src/test/java/com/arangodb/example/FirstProject.java b/src/test/java/com/arangodb/example/FirstProject.java deleted file mode 100644 index dd12053b6..000000000 --- a/src/test/java/com/arangodb/example/FirstProject.java +++ /dev/null @@ -1,136 +0,0 @@ -package com.arangodb.example; - -import java.util.Map; - -import com.arangodb.ArangoCollection; -import com.arangodb.ArangoCursor; -import com.arangodb.ArangoDB; -import com.arangodb.ArangoDBException; -import com.arangodb.entity.BaseDocument; -import com.arangodb.entity.CollectionEntity; -import com.arangodb.util.MapBuilder; -import com.arangodb.velocypack.VPackSlice; - -public class FirstProject { - - public static void main(final String[] args) { - final ArangoDB arangoDB = new ArangoDB.Builder().user("root").build(); - - // create database - final String dbName = "mydb"; - try { - arangoDB.createDatabase(dbName); - System.out.println("Database created: " + dbName); - } catch (final ArangoDBException e) { - System.err.println("Failed to create database: " + dbName + "; " + e.getMessage()); - } - - // create collection - final String collectionName = "firstCollection"; - try { - final CollectionEntity myArangoCollection = arangoDB.db(dbName).createCollection(collectionName); - System.out.println("Collection created: " + myArangoCollection.getName()); - } catch (final ArangoDBException e) { - System.err.println("Failed to create collection: " + collectionName + "; " + e.getMessage()); - } - - // creating a document - final BaseDocument myObject = new BaseDocument(); - myObject.setKey("myKey"); - myObject.addAttribute("a", "Foo"); - myObject.addAttribute("b", 42); - try { - arangoDB.db(dbName).collection(collectionName).insertDocument(myObject); - System.out.println("Document created"); - } catch (final ArangoDBException e) { - System.err.println("Failed to create document. " + e.getMessage()); - } - - // read a document - try { - final BaseDocument myDocument = arangoDB.db(dbName).collection(collectionName).getDocument("myKey", - BaseDocument.class); - System.out.println("Key: " + myDocument.getKey()); - System.out.println("Attribute a: " + myDocument.getAttribute("a")); - System.out.println("Attribute b: " + myDocument.getAttribute("b")); - } catch (final ArangoDBException e) { - System.err.println("Failed to get document: myKey; " + e.getMessage()); - } - - // read a document as VPack - try { - final VPackSlice myDocument = arangoDB.db(dbName).collection(collectionName).getDocument("myKey", - VPackSlice.class); - System.out.println("Key: " + myDocument.get("_key").getAsString()); - System.out.println("Attribute a: " + myDocument.get("a").getAsString()); - System.out.println("Attribute b: " + myDocument.get("b").getAsInt()); - } catch (final ArangoDBException e) { - System.err.println("Failed to get document: myKey; " + e.getMessage()); - } - - // update a document - myObject.addAttribute("c", "Bar"); - try { - arangoDB.db(dbName).collection(collectionName).updateDocument("myKey", myObject); - } catch (final ArangoDBException e) { - System.err.println("Failed to update document. " + e.getMessage()); - } - - // read the document again - try { - final BaseDocument myUpdatedDocument = arangoDB.db(dbName).collection(collectionName).getDocument("myKey", - BaseDocument.class); - System.out.println("Key: " + myUpdatedDocument.getKey()); - System.out.println("Attribute a: " + myUpdatedDocument.getAttribute("a")); - System.out.println("Attribute b: " + myUpdatedDocument.getAttribute("b")); - System.out.println("Attribute c: " + myUpdatedDocument.getAttribute("c")); - } catch (final ArangoDBException e) { - System.err.println("Failed to get document: myKey; " + e.getMessage()); - } - - // delete a document - try { - arangoDB.db(dbName).collection(collectionName).deleteDocument("myKey"); - } catch (final ArangoDBException e) { - System.err.println("Failed to delete document. " + e.getMessage()); - } - - // create some documents for the next step - final ArangoCollection collection = arangoDB.db(dbName).collection(collectionName); - for (int i = 0; i < 10; i++) { - final BaseDocument value = new BaseDocument(); - value.setKey(String.valueOf(i)); - value.addAttribute("name", "Homer"); - collection.insertDocument(value); - } - - // execute AQL queries - try { - final String query = "FOR t IN firstCollection FILTER t.name == @name RETURN t"; - final Map bindVars = new MapBuilder().put("name", "Homer").get(); - final ArangoCursor cursor = arangoDB.db(dbName).query(query, bindVars, null, - BaseDocument.class); - for (; cursor.hasNext();) { - System.out.println("Key: " + cursor.next().getKey()); - } - } catch (final ArangoDBException e) { - System.err.println("Failed to execute query. " + e.getMessage()); - } - - // delete a document with AQL - try { - final String query = "FOR t IN firstCollection FILTER t.name == @name " - + "REMOVE t IN firstCollection LET removed = OLD RETURN removed"; - final Map bindVars = new MapBuilder().put("name", "Homer").get(); - final ArangoCursor cursor = arangoDB.db(dbName).query(query, bindVars, null, - BaseDocument.class); - for (; cursor.hasNext();) { - System.out.println("Removed document " + cursor.next().getKey()); - } - } catch (final ArangoDBException e) { - System.err.println("Failed to execute query. " + e.getMessage()); - } - - } - -} diff --git a/src/test/java/com/arangodb/example/document/AqlQueryWithSpecialReturnTypesExample.java b/src/test/java/com/arangodb/example/document/AqlQueryWithSpecialReturnTypesExample.java deleted file mode 100644 index 331a4226f..000000000 --- a/src/test/java/com/arangodb/example/document/AqlQueryWithSpecialReturnTypesExample.java +++ /dev/null @@ -1,140 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.example.document; - -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.isOneOf; -import static org.hamcrest.Matchers.notNullValue; -import static org.junit.Assert.assertThat; - -import java.util.List; -import java.util.Map; - -import org.junit.BeforeClass; -import org.junit.Test; - -import com.arangodb.ArangoCursor; -import com.arangodb.entity.BaseDocument; -import com.arangodb.example.ExampleBase; -import com.arangodb.util.MapBuilder; -import com.arangodb.velocypack.VPackSlice; -import com.arangodb.velocypack.exception.VPackException; - -/** - * @author Mark Vollmary - * - */ -public class AqlQueryWithSpecialReturnTypesExample extends ExampleBase { - - @BeforeClass - public static void before() { - createExamples(); - } - - public enum Gender { - MALE, FEMALE - } - - private static void createExamples() { - for (int i = 0; i < 100; i++) { - final BaseDocument value = new BaseDocument(); - value.addAttribute("name", "TestUser" + i); - value.addAttribute("gender", (i % 2) == 0 ? Gender.MALE : Gender.FEMALE); - value.addAttribute("age", i + 10); - db.collection(COLLECTION_NAME).insertDocument(value); - } - } - - @Test - public void aqlWithLimitQueryAsVPackObject() { - final String query = "FOR t IN " + COLLECTION_NAME - + " FILTER t.age >= 20 && t.age < 30 && t.gender == @gender RETURN t"; - final Map bindVars = new MapBuilder().put("gender", Gender.FEMALE).get(); - final ArangoCursor cursor = db.query(query, bindVars, null, VPackSlice.class); - assertThat(cursor, is(notNullValue())); - for (; cursor.hasNext();) { - final VPackSlice vpack = cursor.next(); - try { - assertThat(vpack.get("name").getAsString(), - isOneOf("TestUser11", "TestUser13", "TestUser15", "TestUser17", "TestUser19")); - assertThat(vpack.get("gender").getAsString(), is(Gender.FEMALE.name())); - assertThat(vpack.get("age").getAsInt(), isOneOf(21, 23, 25, 27, 29)); - } catch (final VPackException e) { - } - } - } - - @Test - public void aqlWithLimitQueryAsVPackArray() { - final String query = "FOR t IN " + COLLECTION_NAME - + " FILTER t.age >= 20 && t.age < 30 && t.gender == @gender RETURN [t.name, t.gender, t.age]"; - final Map bindVars = new MapBuilder().put("gender", Gender.FEMALE).get(); - final ArangoCursor cursor = db.query(query, bindVars, null, VPackSlice.class); - assertThat(cursor, is(notNullValue())); - for (; cursor.hasNext();) { - final VPackSlice vpack = cursor.next(); - assertThat(vpack.get(0).getAsString(), - isOneOf("TestUser11", "TestUser13", "TestUser15", "TestUser17", "TestUser19")); - assertThat(vpack.get(1).getAsString(), is(Gender.FEMALE.name())); - assertThat(vpack.get(2).getAsInt(), isOneOf(21, 23, 25, 27, 29)); - } - } - - @Test - @SuppressWarnings("rawtypes") - public void aqlWithLimitQueryAsMap() { - final String query = "FOR t IN " + COLLECTION_NAME - + " FILTER t.age >= 20 && t.age < 30 && t.gender == @gender RETURN t"; - final Map bindVars = new MapBuilder().put("gender", Gender.FEMALE).get(); - final ArangoCursor cursor = db.query(query, bindVars, null, Map.class); - assertThat(cursor, is(notNullValue())); - for (; cursor.hasNext();) { - final Map map = cursor.next(); - assertThat(map.get("name"), is(notNullValue())); - assertThat(String.valueOf(map.get("name")), - isOneOf("TestUser11", "TestUser13", "TestUser15", "TestUser17", "TestUser19")); - assertThat(map.get("gender"), is(notNullValue())); - assertThat(String.valueOf(map.get("gender")), is(Gender.FEMALE.name())); - assertThat(map.get("age"), is(notNullValue())); - assertThat(Long.valueOf(map.get("age").toString()), isOneOf(21L, 23L, 25L, 27L, 29L)); - } - } - - @Test - @SuppressWarnings("rawtypes") - public void aqlWithLimitQueryAsList() { - final String query = "FOR t IN " + COLLECTION_NAME - + " FILTER t.age >= 20 && t.age < 30 && t.gender == @gender RETURN [t.name, t.gender, t.age]"; - final Map bindVars = new MapBuilder().put("gender", Gender.FEMALE).get(); - final ArangoCursor cursor = db.query(query, bindVars, null, List.class); - assertThat(cursor, is(notNullValue())); - for (; cursor.hasNext();) { - final List list = cursor.next(); - assertThat(list.get(0), is(notNullValue())); - assertThat(String.valueOf(list.get(0)), - isOneOf("TestUser11", "TestUser13", "TestUser15", "TestUser17", "TestUser19")); - assertThat(list.get(1), is(notNullValue())); - assertThat(Gender.valueOf(String.valueOf(list.get(1))), is(Gender.FEMALE)); - assertThat(list.get(2), is(notNullValue())); - assertThat(Long.valueOf(String.valueOf(list.get(2))), isOneOf(21L, 23L, 25L, 27L, 29L)); - } - } -} diff --git a/src/test/java/com/arangodb/example/document/GetDocumentExample.java b/src/test/java/com/arangodb/example/document/GetDocumentExample.java deleted file mode 100644 index d16da952d..000000000 --- a/src/test/java/com/arangodb/example/document/GetDocumentExample.java +++ /dev/null @@ -1,94 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.example.document; - -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.notNullValue; -import static org.junit.Assert.assertThat; - -import java.util.Map; - -import org.json.simple.parser.ParseException; -import org.junit.BeforeClass; -import org.junit.Test; - -import com.arangodb.entity.BaseDocument; -import com.arangodb.entity.DocumentCreateEntity; -import com.arangodb.example.ExampleBase; -import com.arangodb.velocypack.VPackSlice; - -/** - * @author Mark Vollmary - * - */ -public class GetDocumentExample extends ExampleBase { - - private static String key = null; - - @BeforeClass - public static void before() { - final BaseDocument value = new BaseDocument(); - value.addAttribute("foo", "bar"); - final DocumentCreateEntity doc = collection.insertDocument(value); - key = doc.getKey(); - } - - @Test - public void getAsBean() { - final TestEntity doc = collection.getDocument(key, TestEntity.class); - assertThat(doc, is(notNullValue())); - assertThat(doc.getFoo(), is("bar")); - } - - @Test - public void getAsBaseDocument() { - final BaseDocument doc = collection.getDocument(key, BaseDocument.class); - assertThat(doc, is(notNullValue())); - assertThat(doc.getAttribute("foo"), is(notNullValue())); - assertThat(String.valueOf(doc.getAttribute("foo")), is("bar")); - } - - @SuppressWarnings("unchecked") - @Test - public void getAsMap() { - final Map doc = collection.getDocument(key, Map.class); - assertThat(doc, is(notNullValue())); - assertThat(doc.get("foo"), is(notNullValue())); - assertThat(String.valueOf(doc.get("foo")), is("bar")); - } - - @Test - public void getAsVPack() { - final VPackSlice doc = collection.getDocument(key, VPackSlice.class); - assertThat(doc, is(notNullValue())); - assertThat(doc.get("foo").isString(), is(true)); - assertThat(doc.get("foo").getAsString(), is("bar")); - } - - @Test - public void getAsJson() throws ParseException { - final String doc = collection.getDocument(key, String.class); - assertThat(doc, is(notNullValue())); - assertThat(doc.contains("foo"), is(true)); - assertThat(doc.contains("bar"), is(true)); - } - -} diff --git a/src/test/java/com/arangodb/example/document/InsertDocumentExample.java b/src/test/java/com/arangodb/example/document/InsertDocumentExample.java deleted file mode 100644 index de82bcb33..000000000 --- a/src/test/java/com/arangodb/example/document/InsertDocumentExample.java +++ /dev/null @@ -1,70 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.example.document; - -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.notNullValue; -import static org.junit.Assert.assertThat; - -import org.junit.Test; - -import com.arangodb.entity.BaseDocument; -import com.arangodb.entity.DocumentCreateEntity; -import com.arangodb.example.ExampleBase; -import com.arangodb.velocypack.VPackBuilder; -import com.arangodb.velocypack.VPackSlice; -import com.arangodb.velocypack.ValueType; - -/** - * @author Mark Vollmary - * - */ -public class InsertDocumentExample extends ExampleBase { - - @Test - public void insertBean() { - final DocumentCreateEntity doc = collection.insertDocument(new TestEntity("bar")); - assertThat(doc.getKey(), is(notNullValue())); - } - - @Test - public void insertBaseDocument() { - final BaseDocument value = new BaseDocument(); - value.addAttribute("foo", "bar"); - final DocumentCreateEntity doc = collection.insertDocument(value); - assertThat(doc.getKey(), is(notNullValue())); - } - - @Test - public void insertVPack() { - final VPackBuilder builder = new VPackBuilder(); - builder.add(ValueType.OBJECT).add("foo", "bar").close(); - final DocumentCreateEntity doc = collection.insertDocument(builder.slice()); - assertThat(doc.getKey(), is(notNullValue())); - } - - @Test - public void insertJson() { - final DocumentCreateEntity doc = collection.insertDocument("{\"foo\":\"bar\"}"); - assertThat(doc.getKey(), is(notNullValue())); - } - -} diff --git a/src/test/java/com/arangodb/example/graph/AQLActorsAndMoviesExample.java b/src/test/java/com/arangodb/example/graph/AQLActorsAndMoviesExample.java deleted file mode 100644 index 79ef27823..000000000 --- a/src/test/java/com/arangodb/example/graph/AQLActorsAndMoviesExample.java +++ /dev/null @@ -1,573 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.example.graph; - -import static org.hamcrest.Matchers.hasItems; -import static org.junit.Assert.assertThat; - -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.Test; - -import com.arangodb.ArangoCollection; -import com.arangodb.ArangoCursor; -import com.arangodb.ArangoDB; -import com.arangodb.ArangoDBException; -import com.arangodb.ArangoDatabase; -import com.arangodb.entity.BaseDocument; -import com.arangodb.entity.BaseEdgeDocument; -import com.arangodb.entity.CollectionType; -import com.arangodb.entity.DocumentCreateEntity; -import com.arangodb.model.CollectionCreateOptions; - -/** - * @author Mark Vollmary - * - * @see AQL Example Queries on an - * Actors and Movies Database - * - */ -public class AQLActorsAndMoviesExample { - - private static final String TEST_DB = "actors_movies_test_db"; - private static ArangoDB arangoDB; - private static ArangoDatabase db; - - @BeforeClass - public static void setUp() { - arangoDB = new ArangoDB.Builder().build(); - try { - arangoDB.db(TEST_DB).drop(); - } catch (final ArangoDBException e) { - } - arangoDB.createDatabase(TEST_DB); - db = arangoDB.db(TEST_DB); - createData(); - } - - @AfterClass - public static void tearDown() { - db.drop(); - arangoDB.shutdown(); - } - - /** - * @see AQL - * Example Queries on an Actors and Movies Database - */ - @Test - public void allActorsActsInMovie1or2() { - final ArangoCursor cursor = db.query( - "WITH actors FOR x IN ANY 'movies/TheMatrix' actsIn OPTIONS {bfs: true, uniqueVertices: 'global'} RETURN x._id", - null, null, String.class); - assertThat(cursor.asListRemaining(), - hasItems("actors/Keanu", "actors/Hugo", "actors/Emil", "actors/Carrie", "actors/Laurence")); - } - - /** - * @see AQL - * Example Queries on an Actors and Movies Database - */ - @Test - public void allActorsActsInMovie1or2UnionDistinct() { - final ArangoCursor cursor = db.query( - "WITH actors FOR x IN UNION_DISTINCT ((FOR y IN ANY 'movies/TheMatrix' actsIn OPTIONS {bfs: true, uniqueVertices: 'global'} RETURN y._id), (FOR y IN ANY 'movies/TheDevilsAdvocate' actsIn OPTIONS {bfs: true, uniqueVertices: 'global'} RETURN y._id)) RETURN x", - null, null, String.class); - assertThat(cursor.asListRemaining(), hasItems("actors/Emil", "actors/Hugo", "actors/Carrie", "actors/Laurence", - "actors/Keanu", "actors/Al", "actors/Charlize")); - } - - /** - * @see AQL - * Example Queries on an Actors and Movies Database - */ - @Test - public void allActorsActsInMovie1and2() { - final ArangoCursor cursor = db.query( - "WITH actors FOR x IN INTERSECTION ((FOR y IN ANY 'movies/TheMatrix' actsIn OPTIONS {bfs: true, uniqueVertices: 'global'} RETURN y._id), (FOR y IN ANY 'movies/TheDevilsAdvocate' actsIn OPTIONS {bfs: true, uniqueVertices: 'global'} RETURN y._id)) RETURN x", - null, null, String.class); - assertThat(cursor.asListRemaining(), hasItems("actors/Keanu")); - } - - /** - * @see AQL - * Example Queries on an Actors and Movies Database - */ - @Test - public void allMoviesBetweenActor1andActor2() { - final ArangoCursor cursor = db.query( - "WITH movies FOR x IN INTERSECTION ((FOR y IN ANY 'actors/Hugo' actsIn OPTIONS {bfs: true, uniqueVertices: 'global'} RETURN y._id), (FOR y IN ANY 'actors/Keanu' actsIn OPTIONS {bfs: true, uniqueVertices: 'global'} RETURN y._id)) RETURN x", - null, null, String.class); - assertThat(cursor.asListRemaining(), - hasItems("movies/TheMatrixRevolutions", "movies/TheMatrixReloaded", "movies/TheMatrix")); - } - - /** - * @see AQL - * Example Queries on an Actors and Movies Database - */ - @Test - public void allActorsWhoActedIn3orMoreMovies() { - final ArangoCursor cursor = db.query( - "FOR x IN actsIn COLLECT actor = x._from WITH COUNT INTO counter FILTER counter >= 3 RETURN {actor: actor, movies: counter}", - null, null, Actor.class); - assertThat(cursor.asListRemaining(), - hasItems(new Actor("actors/Carrie", 3), new Actor("actors/CubaG", 4), new Actor("actors/Hugo", 3), - new Actor("actors/Keanu", 4), new Actor("actors/Laurence", 3), new Actor("actors/MegR", 5), - new Actor("actors/TomC", 3), new Actor("actors/TomH", 3))); - } - - /** - * @see AQL - * Example Queries on an Actors and Movies Database - */ - @Test - public void allMoviesWhereExactly6ActorsActedIn() { - final ArangoCursor cursor = db.query( - "FOR x IN actsIn COLLECT movie = x._to WITH COUNT INTO counter FILTER counter == 6 RETURN movie", null, - null, String.class); - assertThat(cursor.asListRemaining(), - hasItems("movies/SleeplessInSeattle", "movies/TopGun", "movies/YouveGotMail")); - } - - /** - * @see AQL - * Example Queries on an Actors and Movies Database - */ - @Test - public void theNumberOfActorsByMovie() { - final ArangoCursor cursor = db.query( - "FOR x IN actsIn COLLECT movie = x._to WITH COUNT INTO counter RETURN {movie: movie, actors: counter}", - null, null, Movie.class); - assertThat(cursor.asListRemaining(), - hasItems(new Movie("movies/AFewGoodMen", 11), new Movie("movies/AsGoodAsItGets", 4), - new Movie("movies/JerryMaguire", 9), new Movie("movies/JoeVersustheVolcano", 3), - new Movie("movies/SleeplessInSeattle", 6), new Movie("movies/SnowFallingonCedars", 4), - new Movie("movies/StandByMe", 7), new Movie("movies/TheDevilsAdvocate", 3), - new Movie("movies/TheMatrix", 5), new Movie("movies/TheMatrixReloaded", 4), - new Movie("movies/TheMatrixRevolutions", 4), new Movie("movies/TopGun", 6), - new Movie("movies/WhatDreamsMayCome", 5), new Movie("movies/WhenHarryMetSally", 4), - new Movie("movies/YouveGotMail", 6))); - } - - /** - * @see AQL - * Example Queries on an Actors and Movies Database - */ - @Test - public void theNumberOfMoviesByActor() { - final ArangoCursor cursor = db.query( - "FOR x IN actsIn COLLECT actor = x._from WITH COUNT INTO counter RETURN {actor: actor, movies: counter}", - null, null, Actor.class); - assertThat(cursor.asListRemaining(), - hasItems(new Actor("actors/Al", 1), new Actor("actors/AnnabellaS", 1), new Actor("actors/AnthonyE", 1), - new Actor("actors/BillPull", 1), new Actor("actors/BillyC", 1), new Actor("actors/BonnieH", 1), - new Actor("actors/BrunoK", 1), new Actor("actors/Carrie", 3), new Actor("actors/CarrieF", 1), - new Actor("actors/Charlize", 1), new Actor("actors/ChristopherG", 1), new Actor("actors/CoreyF", 1), - new Actor("actors/CubaG", 4), new Actor("actors/DaveC", 1), new Actor("actors/DemiM", 1), - new Actor("actors/Emil", 1), new Actor("actors/EthanH", 1), new Actor("actors/GregK", 2), - new Actor("actors/HelenH", 1), new Actor("actors/Hugo", 3), new Actor("actors/JackN", 2), - new Actor("actors/JamesC", 1), new Actor("actors/JamesM", 1), new Actor("actors/JayM", 1), - new Actor("actors/JerryO", 2), new Actor("actors/JohnC", 1), new Actor("actors/JonathanL", 1), - new Actor("actors/JTW", 1), new Actor("actors/Keanu", 4), new Actor("actors/KellyM", 1), - new Actor("actors/KellyP", 1), new Actor("actors/KevinB", 1), new Actor("actors/KevinP", 1), - new Actor("actors/KieferS", 2), new Actor("actors/Laurence", 3), new Actor("actors/MarshallB", 1), - new Actor("actors/MaxS", 2), new Actor("actors/MegR", 5), new Actor("actors/Nathan", 1), - new Actor("actors/NoahW", 1), new Actor("actors/ParkerP", 1), new Actor("actors/ReginaK", 1), - new Actor("actors/ReneeZ", 1), new Actor("actors/RickY", 1), new Actor("actors/RitaW", 1), - new Actor("actors/RiverP", 1), new Actor("actors/Robin", 1), new Actor("actors/RosieO", 1), - new Actor("actors/SteveZ", 1), new Actor("actors/TomC", 3), new Actor("actors/TomH", 3), - new Actor("actors/TomS", 1), new Actor("actors/ValK", 1), new Actor("actors/VictorG", 1), - new Actor("actors/WernerH", 1), new Actor("actors/WilW", 1))); - } - - /** - * @see AQL - * Example Queries on an Actors and Movies Database - */ - @Test - public void theNumberOfMoviesActedInBetween2005and2010byActor() { - final ArangoCursor cursor = db.query( - "FOR x IN actsIn FILTER x.year >= 1990 && x.year <= 1995 COLLECT actor = x._from WITH COUNT INTO counter RETURN {actor: actor, movies: counter}", - null, null, Actor.class); - assertThat(cursor.asListRemaining(), - hasItems(new Actor("actors/BillPull", 1), new Actor("actors/ChristopherG", 1), new Actor("actors/CubaG", 1), - new Actor("actors/DemiM", 1), new Actor("actors/JackN", 1), new Actor("actors/JamesM", 1), - new Actor("actors/JTW", 1), new Actor("actors/KevinB", 1), new Actor("actors/KieferS", 1), - new Actor("actors/MegR", 2), new Actor("actors/Nathan", 1), new Actor("actors/NoahW", 1), - new Actor("actors/RitaW", 1), new Actor("actors/RosieO", 1), new Actor("actors/TomC", 1), - new Actor("actors/TomH", 2), new Actor("actors/VictorG", 1))); - } - - public static class Actor { - private String actor; - private Integer movies; - - public Actor() { - super(); - } - - public Actor(final String actor, final Integer movies) { - super(); - this.actor = actor; - this.movies = movies; - } - - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + ((actor == null) ? 0 : actor.hashCode()); - result = prime * result + ((movies == null) ? 0 : movies.hashCode()); - return result; - } - - @Override - public boolean equals(final Object obj) { - if (this == obj) { - return true; - } - if (obj == null) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - final Actor other = (Actor) obj; - if (actor == null) { - if (other.actor != null) { - return false; - } - } else if (!actor.equals(other.actor)) { - return false; - } - if (movies == null) { - if (other.movies != null) { - return false; - } - } else if (!movies.equals(other.movies)) { - return false; - } - return true; - } - - } - - public static class Movie { - private String movie; - private Integer actors; - - public Movie() { - super(); - } - - public Movie(final String movie, final Integer actors) { - super(); - this.movie = movie; - this.actors = actors; - } - - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + ((actors == null) ? 0 : actors.hashCode()); - result = prime * result + ((movie == null) ? 0 : movie.hashCode()); - return result; - } - - @Override - public boolean equals(final Object obj) { - if (this == obj) { - return true; - } - if (obj == null) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - final Movie other = (Movie) obj; - if (actors == null) { - if (other.actors != null) { - return false; - } - } else if (!actors.equals(other.actors)) { - return false; - } - if (movie == null) { - if (other.movie != null) { - return false; - } - } else if (!movie.equals(other.movie)) { - return false; - } - return true; - } - - } - - private static DocumentCreateEntity saveMovie( - final ArangoCollection movies, - final String key, - final String title, - final int released, - final String tagline) { - final BaseDocument value = new BaseDocument(); - value.setKey(key); - value.addAttribute("title", title); - value.addAttribute("released", released); - value.addAttribute("tagline", tagline); - return movies.insertDocument(value); - } - - private static DocumentCreateEntity saveActor( - final ArangoCollection actors, - final String key, - final String name, - final int born) { - final BaseDocument value = new BaseDocument(); - value.setKey(key); - value.addAttribute("name", name); - value.addAttribute("born", born); - return actors.insertDocument(value); - } - - private static DocumentCreateEntity saveActsIn( - final ArangoCollection actsIn, - final String actor, - final String movie, - final String[] roles, - final int year) { - final BaseEdgeDocument value = new BaseEdgeDocument(); - value.setFrom(actor); - value.setTo(movie); - value.addAttribute("roles", roles); - value.addAttribute("year", year); - return actsIn.insertDocument(value); - } - - private static void createData() { - db.createCollection("actors"); - final ArangoCollection actors = db.collection("actors"); - db.createCollection("movies"); - final ArangoCollection movies = db.collection("movies"); - db.createCollection("actsIn", new CollectionCreateOptions().type(CollectionType.EDGES)); - final ArangoCollection actsIn = db.collection("actsIn"); - - final String theMatrix = saveMovie(movies, "TheMatrix", "The Matrix", 1999, "Welcome to the Real World") - .getId(); - final String keanu = saveActor(actors, "Keanu", "Keanu Reeves", 1964).getId(); - final String carrie = saveActor(actors, "Carrie", "Carrie-Anne Moss", 1967).getId(); - final String laurence = saveActor(actors, "Laurence", "Laurence Fishburne", 1961).getId(); - final String hugo = saveActor(actors, "Hugo", "Hugo Weaving", 1960).getId(); - final String emil = saveActor(actors, "Emil", "Emil Eifrem", 1978).getId(); - - saveActsIn(actsIn, keanu, theMatrix, new String[] { "Neo" }, 1999); - saveActsIn(actsIn, carrie, theMatrix, new String[] { "Trinity" }, 1999); - saveActsIn(actsIn, laurence, theMatrix, new String[] { "Morpheus" }, 1999); - saveActsIn(actsIn, hugo, theMatrix, new String[] { "Agent Smith" }, 1999); - saveActsIn(actsIn, emil, theMatrix, new String[] { "Emil" }, 1999); - - final String theMatrixReloaded = saveMovie(movies, "TheMatrixReloaded", "The Matrix Reloaded", 2003, - "Free your mind").getId(); - saveActsIn(actsIn, keanu, theMatrixReloaded, new String[] { "Neo" }, 2003); - saveActsIn(actsIn, carrie, theMatrixReloaded, new String[] { "Trinity" }, 2003); - saveActsIn(actsIn, laurence, theMatrixReloaded, new String[] { "Morpheus" }, 2003); - saveActsIn(actsIn, hugo, theMatrixReloaded, new String[] { "Agent Smith" }, 2003); - - final String theMatrixRevolutions = saveMovie(movies, "TheMatrixRevolutions", "The Matrix Revolutions", 2003, - "Everything that has a beginning has an end").getId(); - saveActsIn(actsIn, keanu, theMatrixRevolutions, new String[] { "Neo" }, 2003); - saveActsIn(actsIn, carrie, theMatrixRevolutions, new String[] { "Trinity" }, 2003); - saveActsIn(actsIn, laurence, theMatrixRevolutions, new String[] { "Morpheus" }, 2003); - saveActsIn(actsIn, hugo, theMatrixRevolutions, new String[] { "Agent Smith" }, 2003); - - final String theDevilsAdvocate = saveMovie(movies, "TheDevilsAdvocate", "The Devil's Advocate", 1997, - "Evil has its winning ways").getId(); - final String charlize = saveActor(actors, "Charlize", "Charlize Theron", 1975).getId(); - final String al = saveActor(actors, "Al", "Al Pacino", 1940).getId(); - saveActsIn(actsIn, keanu, theDevilsAdvocate, new String[] { "Kevin Lomax" }, 1997); - saveActsIn(actsIn, charlize, theDevilsAdvocate, new String[] { "Mary Ann Lomax" }, 1997); - saveActsIn(actsIn, al, theDevilsAdvocate, new String[] { "John Milton" }, 1997); - - final String AFewGoodMen = saveMovie(movies, "AFewGoodMen", "A Few Good Men", 1992, - "In the heart of the nation's capital, in a courthouse of the U.S. government, one man will stop at nothing to keep his honor, and one will stop at nothing to find the truth.") - .getId(); - final String tomC = saveActor(actors, "TomC", "Tom Cruise", 1962).getId(); - final String jackN = saveActor(actors, "JackN", "Jack Nicholson", 1937).getId(); - final String demiM = saveActor(actors, "DemiM", "Demi Moore", 1962).getId(); - final String kevinB = saveActor(actors, "KevinB", "Kevin Bacon", 1958).getId(); - final String kieferS = saveActor(actors, "KieferS", "Kiefer Sutherland", 1966).getId(); - final String noahW = saveActor(actors, "NoahW", "Noah Wyle", 1971).getId(); - final String cubaG = saveActor(actors, "CubaG", "Cuba Gooding Jr.", 1968).getId(); - final String kevinP = saveActor(actors, "KevinP", "Kevin Pollak", 1957).getId(); - final String jTW = saveActor(actors, "JTW", "J.T. Walsh", 1943).getId(); - final String jamesM = saveActor(actors, "JamesM", "James Marshall", 1967).getId(); - final String christopherG = saveActor(actors, "ChristopherG", "Christopher Guest", 1948).getId(); - saveActsIn(actsIn, tomC, AFewGoodMen, new String[] { "Lt. Daniel Kaffee" }, 1992); - saveActsIn(actsIn, jackN, AFewGoodMen, new String[] { "Col. Nathan R. Jessup" }, 1992); - saveActsIn(actsIn, demiM, AFewGoodMen, new String[] { "Lt. Cdr. JoAnne Galloway" }, 1992); - saveActsIn(actsIn, kevinB, AFewGoodMen, new String[] { "Capt. Jack Ross" }, 1992); - saveActsIn(actsIn, kieferS, AFewGoodMen, new String[] { "Lt. Jonathan Kendrick" }, 1992); - saveActsIn(actsIn, noahW, AFewGoodMen, new String[] { "Cpl. Jeffrey Barnes" }, 1992); - saveActsIn(actsIn, cubaG, AFewGoodMen, new String[] { "Cpl. Carl Hammaker" }, 1992); - saveActsIn(actsIn, kevinP, AFewGoodMen, new String[] { "Lt. Sam Weinberg" }, 1992); - saveActsIn(actsIn, jTW, AFewGoodMen, new String[] { "Lt. Col. Matthew Andrew Markinson" }, 1992); - saveActsIn(actsIn, jamesM, AFewGoodMen, new String[] { "Pfc. Louden Downey" }, 1992); - saveActsIn(actsIn, christopherG, AFewGoodMen, new String[] { "Dr. Stone" }, 1992); - - final String topGun = saveMovie(movies, "TopGun", "Top Gun", 1986, "I feel the need, the need for speed.") - .getId(); - final String kellyM = saveActor(actors, "KellyM", "Kelly McGillis", 1957).getId(); - final String valK = saveActor(actors, "ValK", "Val Kilmer", 1959).getId(); - final String anthonyE = saveActor(actors, "AnthonyE", "Anthony Edwards", 1962).getId(); - final String tomS = saveActor(actors, "TomS", "Tom Skerritt", 1933).getId(); - final String megR = saveActor(actors, "MegR", "Meg Ryan", 1961).getId(); - saveActsIn(actsIn, tomC, topGun, new String[] { "Maverick" }, 1986); - saveActsIn(actsIn, kellyM, topGun, new String[] { "Charlie" }, 1986); - saveActsIn(actsIn, valK, topGun, new String[] { "Iceman" }, 1986); - saveActsIn(actsIn, anthonyE, topGun, new String[] { "Goose" }, 1986); - saveActsIn(actsIn, tomS, topGun, new String[] { "Viper" }, 1986); - saveActsIn(actsIn, megR, topGun, new String[] { "Carole" }, 1986); - - final String jerryMaguire = saveMovie(movies, "JerryMaguire", "Jerry Maguire", 2000, - "The rest of his life begins now.").getId(); - final String reneeZ = saveActor(actors, "ReneeZ", "Renee Zellweger", 1969).getId(); - final String kellyP = saveActor(actors, "KellyP", "Kelly Preston", 1962).getId(); - final String jerryO = saveActor(actors, "JerryO", "Jerry O'Connell", 1974).getId(); - final String jayM = saveActor(actors, "JayM", "Jay Mohr", 1970).getId(); - final String bonnieH = saveActor(actors, "BonnieH", "Bonnie Hunt", 1961).getId(); - final String reginaK = saveActor(actors, "ReginaK", "Regina King", 1971).getId(); - final String jonathanL = saveActor(actors, "JonathanL", "Jonathan Lipnicki", 1996).getId(); - saveActsIn(actsIn, tomC, jerryMaguire, new String[] { "Jerry Maguire" }, 2000); - saveActsIn(actsIn, cubaG, jerryMaguire, new String[] { "Rod Tidwell" }, 2000); - saveActsIn(actsIn, reneeZ, jerryMaguire, new String[] { "Dorothy Boyd" }, 2000); - saveActsIn(actsIn, kellyP, jerryMaguire, new String[] { "Avery Bishop" }, 2000); - saveActsIn(actsIn, jerryO, jerryMaguire, new String[] { "Frank Cushman" }, 2000); - saveActsIn(actsIn, jayM, jerryMaguire, new String[] { "Bob Sugar" }, 2000); - saveActsIn(actsIn, bonnieH, jerryMaguire, new String[] { "Laurel Boyd" }, 2000); - saveActsIn(actsIn, reginaK, jerryMaguire, new String[] { "Marcee Tidwell" }, 2000); - saveActsIn(actsIn, jonathanL, jerryMaguire, new String[] { "Ray Boyd" }, 2000); - - final String standByMe = saveMovie(movies, "StandByMe", "Stand By Me", 1986, - "For some, it's the last real taste of innocence, and the first real taste of life. But for everyone, it's the time that memories are made of.") - .getId(); - final String riverP = saveActor(actors, "RiverP", "River Phoenix", 1970).getId(); - final String coreyF = saveActor(actors, "CoreyF", "Corey Feldman", 1971).getId(); - final String wilW = saveActor(actors, "WilW", "Wil Wheaton", 1972).getId(); - final String johnC = saveActor(actors, "JohnC", "John Cusack", 1966).getId(); - final String marshallB = saveActor(actors, "MarshallB", "Marshall Bell", 1942).getId(); - saveActsIn(actsIn, wilW, standByMe, new String[] { "Gordie Lachance" }, 1986); - saveActsIn(actsIn, riverP, standByMe, new String[] { "Chris Chambers" }, 1986); - saveActsIn(actsIn, jerryO, standByMe, new String[] { "Vern Tessio" }, 1986); - saveActsIn(actsIn, coreyF, standByMe, new String[] { "Teddy Duchamp" }, 1986); - saveActsIn(actsIn, johnC, standByMe, new String[] { "Denny Lachance" }, 1986); - saveActsIn(actsIn, kieferS, standByMe, new String[] { "Ace Merrill" }, 1986); - saveActsIn(actsIn, marshallB, standByMe, new String[] { "Mr. Lachance" }, 1986); - - final String asGoodAsItGets = saveMovie(movies, "AsGoodAsItGets", "As Good as It Gets", 1997, - "A comedy from the heart that goes for the throat.").getId(); - final String helenH = saveActor(actors, "HelenH", "Helen Hunt", 1963).getId(); - final String gregK = saveActor(actors, "GregK", "Greg Kinnear", 1963).getId(); - saveActsIn(actsIn, jackN, asGoodAsItGets, new String[] { "Melvin Udall" }, 1997); - saveActsIn(actsIn, helenH, asGoodAsItGets, new String[] { "Carol Connelly" }, 1997); - saveActsIn(actsIn, gregK, asGoodAsItGets, new String[] { "Simon Bishop" }, 1997); - saveActsIn(actsIn, cubaG, asGoodAsItGets, new String[] { "Frank Sachs" }, 1997); - - final String whatDreamsMayCome = saveMovie(movies, "WhatDreamsMayCome", "What Dreams May Come", 1998, - "After life there is more. The end is just the beginning.").getId(); - final String annabellaS = saveActor(actors, "AnnabellaS", "Annabella Sciorra", 1960).getId(); - final String maxS = saveActor(actors, "MaxS", "Max von Sydow", 1929).getId(); - final String wernerH = saveActor(actors, "WernerH", "Werner Herzog", 1942).getId(); - final String robin = saveActor(actors, "Robin", "Robin Williams", 1951).getId(); - saveActsIn(actsIn, robin, whatDreamsMayCome, new String[] { "Chris Nielsen" }, 1998); - saveActsIn(actsIn, cubaG, whatDreamsMayCome, new String[] { "Albert Lewis" }, 1998); - saveActsIn(actsIn, annabellaS, whatDreamsMayCome, new String[] { "Annie Collins-Nielsen" }, 1998); - saveActsIn(actsIn, maxS, whatDreamsMayCome, new String[] { "The Tracker" }, 1998); - saveActsIn(actsIn, wernerH, whatDreamsMayCome, new String[] { "The Face" }, 1998); - - final String snowFallingonCedars = saveMovie(movies, "SnowFallingonCedars", "Snow Falling on Cedars", 1999, - "First loves last. Forever.").getId(); - final String ethanH = saveActor(actors, "EthanH", "Ethan Hawke", 1970).getId(); - final String rickY = saveActor(actors, "RickY", "Rick Yune", 1971).getId(); - final String jamesC = saveActor(actors, "JamesC", "James Cromwell", 1940).getId(); - saveActsIn(actsIn, ethanH, snowFallingonCedars, new String[] { "Ishmael Chambers" }, 1999); - saveActsIn(actsIn, rickY, snowFallingonCedars, new String[] { "Kazuo Miyamoto" }, 1999); - saveActsIn(actsIn, maxS, snowFallingonCedars, new String[] { "Nels Gudmundsson" }, 1999); - saveActsIn(actsIn, jamesC, snowFallingonCedars, new String[] { "Judge Fielding" }, 1999); - - final String youveGotMail = saveMovie(movies, "YouveGotMail", "You've Got Mail", 1998, - "At odds in life... in love on-line.").getId(); - final String parkerP = saveActor(actors, "ParkerP", "Parker Posey", 1968).getId(); - final String daveC = saveActor(actors, "DaveC", "Dave Chappelle", 1973).getId(); - final String steveZ = saveActor(actors, "SteveZ", "Steve Zahn", 1967).getId(); - final String tomH = saveActor(actors, "TomH", "Tom Hanks", 1956).getId(); - saveActsIn(actsIn, tomH, youveGotMail, new String[] { "Joe Fox" }, 1998); - saveActsIn(actsIn, megR, youveGotMail, new String[] { "Kathleen Kelly" }, 1998); - saveActsIn(actsIn, gregK, youveGotMail, new String[] { "Frank Navasky" }, 1998); - saveActsIn(actsIn, parkerP, youveGotMail, new String[] { "Patricia Eden" }, 1998); - saveActsIn(actsIn, daveC, youveGotMail, new String[] { "Kevin Jackson" }, 1998); - saveActsIn(actsIn, steveZ, youveGotMail, new String[] { "George Pappas" }, 1998); - - final String sleeplessInSeattle = saveMovie(movies, "SleeplessInSeattle", "Sleepless in Seattle", 1993, - "What if someone you never met, someone you never saw, someone you never knew was the only someone for you?") - .getId(); - final String ritaW = saveActor(actors, "RitaW", "Rita Wilson", 1956).getId(); - final String billPull = saveActor(actors, "BillPull", "Bill Pullman", 1953).getId(); - final String victorG = saveActor(actors, "VictorG", "Victor Garber", 1949).getId(); - final String rosieO = saveActor(actors, "RosieO", "Rosie O'Donnell", 1962).getId(); - saveActsIn(actsIn, tomH, sleeplessInSeattle, new String[] { "Sam Baldwin" }, 1993); - saveActsIn(actsIn, megR, sleeplessInSeattle, new String[] { "Annie Reed" }, 1993); - saveActsIn(actsIn, ritaW, sleeplessInSeattle, new String[] { "Suzy" }, 1993); - saveActsIn(actsIn, billPull, sleeplessInSeattle, new String[] { "Walter" }, 1993); - saveActsIn(actsIn, victorG, sleeplessInSeattle, new String[] { "Greg" }, 1993); - saveActsIn(actsIn, rosieO, sleeplessInSeattle, new String[] { "Becky" }, 1993); - - final String joeVersustheVolcano = saveMovie(movies, "JoeVersustheVolcano", "Joe Versus the Volcano", 1990, - "A story of love, lava and burning desire.").getId(); - final String nathan = saveActor(actors, "Nathan", "Nathan Lane", 1956).getId(); - saveActsIn(actsIn, tomH, joeVersustheVolcano, new String[] { "Joe Banks" }, 1990); - saveActsIn(actsIn, megR, joeVersustheVolcano, - new String[] { "DeDe', 'Angelica Graynamore', 'Patricia Graynamore" }, 1990); - saveActsIn(actsIn, nathan, joeVersustheVolcano, new String[] { "Baw" }, 1990); - - final String whenHarryMetSally = saveMovie(movies, "WhenHarryMetSally", "When Harry Met Sally", 1998, - "At odds in life... in love on-line.").getId(); - final String billyC = saveActor(actors, "BillyC", "Billy Crystal", 1948).getId(); - final String carrieF = saveActor(actors, "CarrieF", "Carrie Fisher", 1956).getId(); - final String brunoK = saveActor(actors, "BrunoK", "Bruno Kirby", 1949).getId(); - saveActsIn(actsIn, billyC, whenHarryMetSally, new String[] { "Harry Burns" }, 1998); - saveActsIn(actsIn, megR, whenHarryMetSally, new String[] { "Sally Albright" }, 1998); - saveActsIn(actsIn, carrieF, whenHarryMetSally, new String[] { "Marie" }, 1998); - saveActsIn(actsIn, brunoK, whenHarryMetSally, new String[] { "Jess" }, 1998); - } - -} diff --git a/src/test/java/com/arangodb/example/graph/BaseGraphTest.java b/src/test/java/com/arangodb/example/graph/BaseGraphTest.java deleted file mode 100644 index 69c2912fc..000000000 --- a/src/test/java/com/arangodb/example/graph/BaseGraphTest.java +++ /dev/null @@ -1,118 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.example.graph; - -import java.util.ArrayList; -import java.util.Collection; - -import org.junit.AfterClass; -import org.junit.BeforeClass; - -import com.arangodb.ArangoDB; -import com.arangodb.ArangoDBException; -import com.arangodb.ArangoDatabase; -import com.arangodb.entity.EdgeDefinition; -import com.arangodb.entity.EdgeEntity; -import com.arangodb.entity.VertexEntity; - -/** - * @author Mark Vollmary - * - */ -public abstract class BaseGraphTest { - - protected static final String TEST_DB = "java_driver_graph_test_db"; - protected static ArangoDB arangoDB; - protected static ArangoDatabase db; - protected static final String GRAPH_NAME = "traversalGraph"; - protected static final String EDGE_COLLECTION_NAME = "edges"; - protected static final String VERTEXT_COLLECTION_NAME = "circles"; - - @BeforeClass - public static void init() { - if (arangoDB == null) { - arangoDB = new ArangoDB.Builder().build(); - } - try { - arangoDB.db(TEST_DB).drop(); - } catch (final ArangoDBException e) { - } - arangoDB.createDatabase(TEST_DB); - BaseGraphTest.db = arangoDB.db(TEST_DB); - - final Collection edgeDefinitions = new ArrayList(); - final EdgeDefinition edgeDefinition = new EdgeDefinition().collection(EDGE_COLLECTION_NAME) - .from(VERTEXT_COLLECTION_NAME).to(VERTEXT_COLLECTION_NAME); - edgeDefinitions.add(edgeDefinition); - try { - db.createGraph(GRAPH_NAME, edgeDefinitions, null); - addExampleElements(); - } catch (final ArangoDBException ex) { - - } - } - - @AfterClass - public static void shutdown() { - arangoDB.db(TEST_DB).drop(); - arangoDB.shutdown(); - arangoDB = null; - } - - private static void addExampleElements() throws ArangoDBException { - - // Add circle circles - final VertexEntity vA = createVertex(new Circle("A", "1")); - final VertexEntity vB = createVertex(new Circle("B", "2")); - final VertexEntity vC = createVertex(new Circle("C", "3")); - final VertexEntity vD = createVertex(new Circle("D", "4")); - final VertexEntity vE = createVertex(new Circle("E", "5")); - final VertexEntity vF = createVertex(new Circle("F", "6")); - final VertexEntity vG = createVertex(new Circle("G", "7")); - final VertexEntity vH = createVertex(new Circle("H", "8")); - final VertexEntity vI = createVertex(new Circle("I", "9")); - final VertexEntity vJ = createVertex(new Circle("J", "10")); - final VertexEntity vK = createVertex(new Circle("K", "11")); - - // Add relevant edges - left branch: - saveEdge(new CircleEdge(vA.getId(), vB.getId(), false, true, "left_bar")); - saveEdge(new CircleEdge(vB.getId(), vC.getId(), false, true, "left_blarg")); - saveEdge(new CircleEdge(vC.getId(), vD.getId(), false, true, "left_blorg")); - saveEdge(new CircleEdge(vB.getId(), vE.getId(), false, true, "left_blub")); - saveEdge(new CircleEdge(vE.getId(), vF.getId(), false, true, "left_schubi")); - - // Add relevant edges - right branch: - saveEdge(new CircleEdge(vA.getId(), vG.getId(), false, true, "right_foo")); - saveEdge(new CircleEdge(vG.getId(), vH.getId(), false, true, "right_blob")); - saveEdge(new CircleEdge(vH.getId(), vI.getId(), false, true, "right_blub")); - saveEdge(new CircleEdge(vG.getId(), vJ.getId(), false, true, "right_zip")); - saveEdge(new CircleEdge(vJ.getId(), vK.getId(), false, true, "right_zup")); - } - - private static EdgeEntity saveEdge(final CircleEdge edge) throws ArangoDBException { - return db.graph(GRAPH_NAME).edgeCollection(EDGE_COLLECTION_NAME).insertEdge(edge); - } - - private static VertexEntity createVertex(final Circle vertex) throws ArangoDBException { - return db.graph(GRAPH_NAME).vertexCollection(VERTEXT_COLLECTION_NAME).insertVertex(vertex); - } - -} diff --git a/src/test/java/com/arangodb/example/graph/Circle.java b/src/test/java/com/arangodb/example/graph/Circle.java deleted file mode 100644 index 3c2e48ff7..000000000 --- a/src/test/java/com/arangodb/example/graph/Circle.java +++ /dev/null @@ -1,80 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.example.graph; - -import com.arangodb.entity.DocumentField; -import com.arangodb.entity.DocumentField.Type; - -/** - * @author a-brandt - * - */ -public class Circle { - - @DocumentField(Type.ID) - private String id; - - @DocumentField(Type.KEY) - private String key; - - @DocumentField(Type.REV) - private String revision; - - private String label; - - public Circle(String key, String label) { - this.key = key; - this.label = label; - } - - public String getId() { - return id; - } - - public void setId(String id) { - this.id = id; - } - - public String getKey() { - return key; - } - - public void setKey(String key) { - this.key = key; - } - - public String getRevision() { - return revision; - } - - public void setRevision(String revision) { - this.revision = revision; - } - - public String getLabel() { - return label; - } - - public void setLabel(String label) { - this.label = label; - } - -} diff --git a/src/test/java/com/arangodb/example/graph/CircleEdge.java b/src/test/java/com/arangodb/example/graph/CircleEdge.java deleted file mode 100644 index dfcc0dc54..000000000 --- a/src/test/java/com/arangodb/example/graph/CircleEdge.java +++ /dev/null @@ -1,124 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.example.graph; - -import com.arangodb.entity.DocumentField; -import com.arangodb.entity.DocumentField.Type; - -/** - * @author a-brandt - * - */ -public class CircleEdge { - - @DocumentField(Type.ID) - private String id; - - @DocumentField(Type.KEY) - private String key; - - @DocumentField(Type.REV) - private String revision; - - @DocumentField(Type.FROM) - private String from; - - @DocumentField(Type.TO) - private String to; - - private Boolean theFalse; - private Boolean theTruth; - private String label; - - public CircleEdge(final String from, final String to, final Boolean theFalse, final Boolean theTruth, - final String label) { - this.from = from; - this.to = to; - this.theFalse = theFalse; - this.theTruth = theTruth; - this.label = label; - } - - public String getId() { - return id; - } - - public void setId(String id) { - this.id = id; - } - - public String getKey() { - return key; - } - - public void setKey(String key) { - this.key = key; - } - - public String getRevision() { - return revision; - } - - public void setRevision(String revision) { - this.revision = revision; - } - - public String getFrom() { - return from; - } - - public void setFrom(String from) { - this.from = from; - } - - public String getTo() { - return to; - } - - public void setTo(String to) { - this.to = to; - } - - public Boolean getTheFalse() { - return theFalse; - } - - public void setTheFalse(Boolean theFalse) { - this.theFalse = theFalse; - } - - public Boolean getTheTruth() { - return theTruth; - } - - public void setTheTruth(Boolean theTruth) { - this.theTruth = theTruth; - } - - public String getLabel() { - return label; - } - - public void setLabel(String label) { - this.label = label; - } - -} diff --git a/src/test/java/com/arangodb/example/graph/GraphTraversalsInAQLExample.java b/src/test/java/com/arangodb/example/graph/GraphTraversalsInAQLExample.java deleted file mode 100644 index 9bfd3a573..000000000 --- a/src/test/java/com/arangodb/example/graph/GraphTraversalsInAQLExample.java +++ /dev/null @@ -1,122 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.example.graph; - -import static org.hamcrest.Matchers.hasItems; -import static org.hamcrest.Matchers.is; -import static org.junit.Assert.assertThat; - -import java.util.Collection; - -import org.junit.Test; - -import com.arangodb.ArangoCursor; -import com.arangodb.ArangoDBException; - -/** - * Graph traversals in AQL - * - * @see Graph traversals in AQL - * - * @author a-brandt - * - */ -public class GraphTraversalsInAQLExample extends BaseGraphTest { - - @Test - public void queryAllVertices() throws ArangoDBException { - String queryString = "FOR v IN 1..3 OUTBOUND 'circles/A' GRAPH 'traversalGraph' RETURN v._key"; - ArangoCursor cursor = db.query(queryString, null, null, String.class); - Collection result = cursor.asListRemaining(); - assertThat(result.size(), is(10)); - - queryString = "WITH circles FOR v IN 1..3 OUTBOUND 'circles/A' edges RETURN v._key"; - cursor = db.query(queryString, null, null, String.class); - result = cursor.asListRemaining(); - assertThat(result.size(), is(10)); - } - - @Test - public void queryDepthTwo() throws ArangoDBException { - String queryString = "FOR v IN 2..2 OUTBOUND 'circles/A' GRAPH 'traversalGraph' return v._key"; - ArangoCursor cursor = db.query(queryString, null, null, String.class); - Collection result = cursor.asListRemaining(); - assertThat(result.size(), is(4)); - assertThat(result, hasItems("C", "E", "H", "J")); - - queryString = "FOR v IN 2 OUTBOUND 'circles/A' GRAPH 'traversalGraph' return v._key"; - cursor = db.query(queryString, null, null, String.class); - result = cursor.asListRemaining(); - assertThat(result.size(), is(4)); - assertThat(result, hasItems("C", "E", "H", "J")); - } - - @Test - public void queryWithFilter() throws ArangoDBException { - String queryString = "FOR v, e, p IN 1..3 OUTBOUND 'circles/A' GRAPH 'traversalGraph' FILTER p.vertices[1]._key != 'G' RETURN v._key"; - ArangoCursor cursor = db.query(queryString, null, null, String.class); - Collection result = cursor.asListRemaining(); - assertThat(result.size(), is(5)); - assertThat(result, hasItems("B", "C", "D", "E", "F")); - - queryString = "FOR v, e, p IN 1..3 OUTBOUND 'circles/A' GRAPH 'traversalGraph' FILTER p.edges[0].label != 'right_foo' RETURN v._key"; - cursor = db.query(queryString, null, null, String.class); - result = cursor.asListRemaining(); - assertThat(result.size(), is(5)); - assertThat(result, hasItems("B", "C", "D", "E", "F")); - - queryString = "FOR v,e,p IN 1..3 OUTBOUND 'circles/A' GRAPH 'traversalGraph' FILTER p.vertices[1]._key != 'G' FILTER p.edges[1].label != 'left_blub' return v._key"; - cursor = db.query(queryString, null, null, String.class); - - result = cursor.asListRemaining(); - assertThat(result.size(), is(3)); - assertThat(result, hasItems("B", "C", "D")); - - queryString = "FOR v,e,p IN 1..3 OUTBOUND 'circles/A' GRAPH 'traversalGraph' FILTER p.vertices[1]._key != 'G' AND p.edges[1].label != 'left_blub' return v._key"; - cursor = db.query(queryString, null, null, String.class); - result = cursor.asListRemaining(); - assertThat(result.size(), is(3)); - assertThat(result, hasItems("B", "C", "D")); - } - - @Test - public void queryOutboundInbound() throws ArangoDBException { - String queryString = "FOR v IN 1..3 OUTBOUND 'circles/E' GRAPH 'traversalGraph' return v._key"; - ArangoCursor cursor = db.query(queryString, null, null, String.class); - Collection result = cursor.asListRemaining(); - assertThat(result.size(), is(1)); - assertThat(result, hasItems("F")); - - queryString = "FOR v IN 1..3 INBOUND 'circles/E' GRAPH 'traversalGraph' return v._key"; - cursor = db.query(queryString, null, null, String.class); - result = cursor.asListRemaining(); - assertThat(result.size(), is(2)); - assertThat(result, hasItems("B", "A")); - - queryString = "FOR v IN 1..3 ANY 'circles/E' GRAPH 'traversalGraph' return v._key"; - cursor = db.query(queryString, null, null, String.class); - - result = cursor.asListRemaining(); - assertThat(result.size(), is(6)); - assertThat(result, hasItems("F", "B", "C", "D", "A", "G")); - } - -} diff --git a/src/test/java/com/arangodb/example/graph/ShortestPathInAQLExample.java b/src/test/java/com/arangodb/example/graph/ShortestPathInAQLExample.java deleted file mode 100644 index 131e14f32..000000000 --- a/src/test/java/com/arangodb/example/graph/ShortestPathInAQLExample.java +++ /dev/null @@ -1,106 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.example.graph; - -import static org.hamcrest.Matchers.hasItems; -import static org.hamcrest.Matchers.is; -import static org.junit.Assert.assertThat; - -import java.util.ArrayList; -import java.util.Collection; -import java.util.List; - -import org.junit.Test; - -import com.arangodb.ArangoCursor; -import com.arangodb.ArangoDBException; - -/** - * Shortest Path in AQL - * - * @see Shortest Path in AQL - * - * @author a-brandt - * - */ -public class ShortestPathInAQLExample extends BaseGraphTest { - - public static class Pair { - - private String vertex; - private String edge; - - public String getVertex() { - return vertex; - } - - public void setVertex(final String vertex) { - this.vertex = vertex; - } - - public String getEdge() { - return edge; - } - - public void setEdge(final String edge) { - this.edge = edge; - } - - } - - @Test - public void queryShortestPathFromAToD() throws ArangoDBException { - String queryString = "FOR v, e IN OUTBOUND SHORTEST_PATH 'circles/A' TO 'circles/D' GRAPH 'traversalGraph' RETURN {'vertex': v._key, 'edge': e._key}"; - ArangoCursor cursor = db.query(queryString, null, null, Pair.class); - final Collection collection = toVertexCollection(cursor); - assertThat(collection.size(), is(4)); - assertThat(collection, hasItems("A", "B", "C", "D")); - - queryString = "WITH circles FOR v, e IN OUTBOUND SHORTEST_PATH 'circles/A' TO 'circles/D' edges RETURN {'vertex': v._key, 'edge': e._key}"; - cursor = db.query(queryString, null, null, Pair.class); - assertThat(collection.size(), is(4)); - assertThat(collection, hasItems("A", "B", "C", "D")); - } - - @Test - public void queryShortestPathByFilter() throws ArangoDBException { - String queryString = "FOR a IN circles FILTER a._key == 'A' FOR d IN circles FILTER d._key == 'D' FOR v, e IN OUTBOUND SHORTEST_PATH a TO d GRAPH 'traversalGraph' RETURN {'vertex':v._key, 'edge':e._key}"; - ArangoCursor cursor = db.query(queryString, null, null, Pair.class); - final Collection collection = toVertexCollection(cursor); - assertThat(collection.size(), is(4)); - assertThat(collection, hasItems("A", "B", "C", "D")); - - queryString = "FOR a IN circles FILTER a._key == 'A' FOR d IN circles FILTER d._key == 'D' FOR v, e IN OUTBOUND SHORTEST_PATH a TO d edges RETURN {'vertex': v._key, 'edge': e._key}"; - cursor = db.query(queryString, null, null, Pair.class); - assertThat(collection.size(), is(4)); - assertThat(collection, hasItems("A", "B", "C", "D")); - } - - protected Collection toVertexCollection(final ArangoCursor cursor) { - final List result = new ArrayList(); - for (; cursor.hasNext();) { - final Pair pair = cursor.next(); - result.add(pair.getVertex()); - } - return result; - } - -} diff --git a/src/test/java/com/arangodb/example/ssl/SslExample.java b/src/test/java/com/arangodb/example/ssl/SslExample.java deleted file mode 100644 index b9abf1b04..000000000 --- a/src/test/java/com/arangodb/example/ssl/SslExample.java +++ /dev/null @@ -1,79 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.example.ssl; - -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.notNullValue; -import static org.junit.Assert.assertThat; - -import java.security.KeyStore; - -import javax.net.ssl.KeyManagerFactory; -import javax.net.ssl.SSLContext; -import javax.net.ssl.TrustManagerFactory; - -import org.junit.Ignore; -import org.junit.Test; - -import com.arangodb.ArangoDB; -import com.arangodb.entity.ArangoDBVersion; - -/** - * @author Mark Vollmary - * - */ -public class SslExample { - - /*- - * a SSL trust store - * - * create the trust store for the self signed certificate: - * keytool -import -alias "my arangodb server cert" -file UnitTests/server.pem -keystore example.truststore - * - * Documentation: - * https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache/http/conn/ssl/SSLSocketFactory.html - */ - private static final String SSL_TRUSTSTORE = "/example.truststore"; - private static final String SSL_TRUSTSTORE_PASSWORD = "12345678"; - - @Test - @Ignore - public void connect() throws Exception { - final KeyStore ks = KeyStore.getInstance(KeyStore.getDefaultType()); - ks.load(this.getClass().getResourceAsStream(SSL_TRUSTSTORE), SSL_TRUSTSTORE_PASSWORD.toCharArray()); - - final KeyManagerFactory kmf = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm()); - kmf.init(ks, SSL_TRUSTSTORE_PASSWORD.toCharArray()); - - final TrustManagerFactory tmf = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm()); - tmf.init(ks); - - final SSLContext sc = SSLContext.getInstance("TLS"); - sc.init(kmf.getKeyManagers(), tmf.getTrustManagers(), null); - - final ArangoDB arangoDB = new ArangoDB.Builder() - .loadProperties(SslExample.class.getResourceAsStream("/arangodb-ssl.properties")).useSsl(true) - .sslContext(sc).build(); - final ArangoDBVersion version = arangoDB.getVersion(); - assertThat(version, is(notNullValue())); - } - -} diff --git a/src/test/java/com/arangodb/example/velocypack/VPackExample.java b/src/test/java/com/arangodb/example/velocypack/VPackExample.java deleted file mode 100644 index b01d711b3..000000000 --- a/src/test/java/com/arangodb/example/velocypack/VPackExample.java +++ /dev/null @@ -1,115 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.example.velocypack; - -import static org.hamcrest.Matchers.is; -import static org.junit.Assert.assertThat; - -import java.util.Iterator; -import java.util.Map.Entry; - -import org.junit.Test; - -import com.arangodb.velocypack.VPackBuilder; -import com.arangodb.velocypack.VPackSlice; -import com.arangodb.velocypack.ValueType; -import com.arangodb.velocypack.exception.VPackException; - -/** - * @author Mark Vollmary - * - */ -public class VPackExample { - - @Test - public void buildObject() throws VPackException { - final VPackBuilder builder = new VPackBuilder(); - builder.add(ValueType.OBJECT);// object start - builder.add("foo", 1); // add field "foo" with value 1 - builder.add("bar", 2); // add field "bar" with value 2 - builder.close();// object end - - final VPackSlice slice = builder.slice(); // create slice - assertThat(slice.isObject(), is(true)); - assertThat(slice.size(), is(2)); // number of fields - - final VPackSlice foo = slice.get("foo"); // get field "foo" - assertThat(foo.isInteger(), is(true)); - assertThat(foo.getAsInt(), is(1)); - - final VPackSlice bar = slice.get("bar"); // get field "bar" - assertThat(bar.isInteger(), is(true)); - assertThat(bar.getAsInt(), is(2)); - - // iterate over the fields - for (final Iterator> iterator = slice.objectIterator(); iterator.hasNext();) { - final Entry field = iterator.next(); - assertThat(field.getValue().isInteger(), is(true)); - } - } - - @Test - public void buildArray() throws VPackException { - final VPackBuilder builder = new VPackBuilder(); - builder.add(ValueType.ARRAY); // array start - builder.add(1);// add value 1 - builder.add(2);// add value 2 - builder.add(3);// add value 3 - builder.close(); // array end - - final VPackSlice slice = builder.slice();// create slice - assertThat(slice.isArray(), is(true)); - assertThat(slice.size(), is(3));// number of values - - // iterate over values - for (int i = 0; i < slice.size(); i++) { - final VPackSlice value = slice.get(i); - assertThat(value.isInteger(), is(true)); - assertThat(value.getAsInt(), is(i + 1)); - } - - // iterate over values with Iterator - for (final Iterator iterator = slice.arrayIterator(); iterator.hasNext();) { - final VPackSlice value = iterator.next(); - assertThat(value.isInteger(), is(true)); - } - } - - @Test - public void buildObjectInObject() throws VPackException { - final VPackBuilder builder = new VPackBuilder(); - builder.add(ValueType.OBJECT);// object start - builder.add("foo", ValueType.OBJECT); // add object in field "foo" - builder.add("bar", 2); // add field "bar" with value 2 to object "foo" - builder.close();// object "foo" end - builder.close();// object end - - final VPackSlice slice = builder.slice(); // create slice - assertThat(slice.isObject(), is(true)); - - final VPackSlice foo = slice.get("foo"); - assertThat(foo.isObject(), is(true)); - - final VPackSlice bar = foo.get("bar"); // get field "bar" from "foo" - assertThat(bar.isInteger(), is(true)); - } - -} diff --git a/src/test/java/com/arangodb/internal/DocumentCacheTest.java b/src/test/java/com/arangodb/internal/DocumentCacheTest.java deleted file mode 100644 index 71335e30f..000000000 --- a/src/test/java/com/arangodb/internal/DocumentCacheTest.java +++ /dev/null @@ -1,75 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.internal; - -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.nullValue; -import static org.junit.Assert.assertThat; - -import java.util.HashMap; -import java.util.Map; - -import org.junit.Test; - -import com.arangodb.entity.BaseDocument; -import com.arangodb.entity.DocumentField; -import com.arangodb.entity.DocumentField.Type; - -/** - * @author Mark Vollmary - * - */ -public class DocumentCacheTest { - - @Test - public void setValues() { - final DocumentCache cache = new DocumentCache(); - final BaseDocument doc = new BaseDocument(); - - assertThat(doc.getId(), is(nullValue())); - assertThat(doc.getKey(), is(nullValue())); - assertThat(doc.getRevision(), is(nullValue())); - - final Map values = new HashMap(); - values.put(Type.ID, "testId"); - values.put(Type.KEY, "testKey"); - values.put(Type.REV, "testRev"); - cache.setValues(doc, values); - - assertThat(doc.getId(), is("testId")); - assertThat(doc.getKey(), is("testKey")); - assertThat(doc.getRevision(), is("testRev")); - } - - @Test - public void setValuesMap() { - final DocumentCache cache = new DocumentCache(); - final Map map = new HashMap(); - - final Map values = new HashMap(); - values.put(Type.ID, "testId"); - values.put(Type.KEY, "testKey"); - values.put(Type.REV, "testRev"); - cache.setValues(map, values); - - assertThat(map.isEmpty(), is(true)); - } -} diff --git a/src/test/java/com/arangodb/internal/HostHandlerTest.java b/src/test/java/com/arangodb/internal/HostHandlerTest.java deleted file mode 100644 index 9c962629f..000000000 --- a/src/test/java/com/arangodb/internal/HostHandlerTest.java +++ /dev/null @@ -1,145 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.internal; - -import static org.hamcrest.Matchers.anyOf; -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.not; -import static org.hamcrest.Matchers.nullValue; -import static org.junit.Assert.assertThat; - -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; - -import org.junit.Test; - -import com.arangodb.internal.net.FallbackHostHandler; -import com.arangodb.internal.net.HostHandler; -import com.arangodb.internal.net.HostResolver; -import com.arangodb.internal.net.RandomHostHandler; -import com.arangodb.internal.net.RoundRobinHostHandler; - -/** - * @author Mark Vollmary - * - */ -public class HostHandlerTest { - - private static final Host HOST_0 = new Host("127.0.0.1", 8529); - private static final Host HOST_1 = new Host("127.0.0.2", 8529); - private static final Host HOST_2 = new Host("127.0.0.3", 8529); - - private static final HostResolver SINGLE_HOST = new HostResolver() { - @Override - public List resolve(final boolean initial, final boolean closeConnections) { - return Collections. singletonList(HOST_0); - } - - @Override - public void init(final EndpointResolver resolver) { - } - }; - private static final HostResolver MULTIPLE_HOSTS = new HostResolver() { - @Override - public List resolve(final boolean initial, final boolean closeConnections) { - final ArrayList hosts = new ArrayList(); - hosts.add(HOST_0); - hosts.add(HOST_1); - hosts.add(HOST_2); - return hosts; - } - - @Override - public void init(final EndpointResolver resolver) { - } - }; - - @Test - public void fallbachHostHandlerSingleHost() { - final HostHandler handler = new FallbackHostHandler(SINGLE_HOST); - assertThat(handler.get(), is(HOST_0)); - handler.fail(); - assertThat(handler.get(), is(HOST_0)); - } - - @Test - public void fallbackHostHandlerMultipleHosts() { - final HostHandler handler = new FallbackHostHandler(MULTIPLE_HOSTS); - for (int i = 0; i < 3; i++) { - assertThat(handler.get(), is(HOST_0)); - handler.fail(); - assertThat(handler.get(), is(HOST_1)); - handler.fail(); - assertThat(handler.get(), is(HOST_2)); - if (i < 2) { - handler.fail(); - assertThat(handler.get(), is(HOST_0)); - } else { - handler.fail(); - assertThat(handler.get(), is(nullValue())); - } - } - } - - @Test - public void randomHostHandlerSingleHost() { - final HostHandler handler = new RandomHostHandler(SINGLE_HOST, new FallbackHostHandler(SINGLE_HOST)); - assertThat(handler.get(), is(HOST_0)); - handler.fail(); - assertThat(handler.get(), is(HOST_0)); - } - - @Test - public void randomHostHandlerMultipeHosts() { - final HostHandler handler = new RandomHostHandler(MULTIPLE_HOSTS, new FallbackHostHandler(MULTIPLE_HOSTS)); - final Host pick0 = handler.get(); - assertThat(pick0, anyOf(is(HOST_0), is(HOST_1), is(HOST_2))); - handler.fail(); - assertThat(handler.get(), anyOf(is(HOST_0), is(HOST_1), is(HOST_2))); - handler.success(); - assertThat(handler.get(), is(pick0)); - } - - @Test - public void roundRobinHostHandlerSingleHost() { - final HostHandler handler = new RoundRobinHostHandler(SINGLE_HOST); - assertThat(handler.get(), is(HOST_0)); - handler.fail(); - assertThat(handler.get(), is(HOST_0)); - } - - @Test - public void roundRobinHostHandlerMultipleHosts() { - final HostHandler handler = new RoundRobinHostHandler(MULTIPLE_HOSTS); - final Host pick0 = handler.get(); - assertThat(pick0, anyOf(is(HOST_0), is(HOST_1), is(HOST_2))); - final Host pick1 = handler.get(); - assertThat(pick1, anyOf(is(HOST_0), is(HOST_1), is(HOST_2))); - assertThat(pick1, is(not(pick0))); - final Host pick2 = handler.get(); - assertThat(pick2, anyOf(is(HOST_0), is(HOST_1), is(HOST_2))); - assertThat(pick2, not(anyOf(is(pick0), is(pick1)))); - final Host pick4 = handler.get(); - assertThat(pick4, is(pick0)); - } - -} diff --git a/src/test/java/com/arangodb/internal/velocystream/CommunicationTest.java b/src/test/java/com/arangodb/internal/velocystream/CommunicationTest.java deleted file mode 100644 index 50a263923..000000000 --- a/src/test/java/com/arangodb/internal/velocystream/CommunicationTest.java +++ /dev/null @@ -1,186 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.internal.velocystream; - -import static org.hamcrest.CoreMatchers.notNullValue; -import static org.hamcrest.Matchers.is; -import static org.junit.Assert.assertThat; - -import java.util.Collection; -import java.util.Iterator; -import java.util.concurrent.ConcurrentLinkedQueue; - -import org.junit.Test; - -import com.arangodb.ArangoDB; -import com.arangodb.ArangoDBException; -import com.arangodb.ArangoDatabase; -import com.arangodb.entity.ArangoDBVersion; - -/** - * @author Mark Vollmary - * - */ -public class CommunicationTest { - - private static final String FAST = "fast"; - private static final String SLOW = "slow"; - - @Test - public void chunkSizeSmall() { - final ArangoDB arangoDB = new ArangoDB.Builder().chunksize(20).build(); - final ArangoDBVersion version = arangoDB.getVersion(); - assertThat(version, is(notNullValue())); - } - - @Test - public void multiThread() throws Exception { - final ArangoDB arangoDB = new ArangoDB.Builder().build(); - arangoDB.getVersion();// authentication - - final Collection result = new ConcurrentLinkedQueue(); - final Thread fast = new Thread() { - @Override - public void run() { - try { - arangoDB.db().query("return sleep(1)", null, null, null); - result.add(FAST); - } catch (final ArangoDBException e) { - } - } - }; - final Thread slow = new Thread() { - @Override - public void run() { - try { - arangoDB.db().query("return sleep(4)", null, null, null); - result.add(SLOW); - } catch (final ArangoDBException e) { - } - } - }; - slow.start(); - Thread.sleep(1000); - fast.start(); - - slow.join(); - fast.join(); - - assertThat(result.size(), is(2)); - final Iterator iterator = result.iterator(); - assertThat(iterator.next(), is(FAST)); - assertThat(iterator.next(), is(SLOW)); - } - - @Test - public void multiThreadSameDatabases() throws Exception { - final ArangoDB arangoDB = new ArangoDB.Builder().build(); - arangoDB.getVersion();// authentication - - final ArangoDatabase db = arangoDB.db(); - - final Collection result = new ConcurrentLinkedQueue(); - final Thread t1 = new Thread() { - @Override - public void run() { - try { - db.query("return sleep(1)", null, null, null); - result.add("1"); - } catch (final ArangoDBException e) { - e.printStackTrace(System.err); - } - } - }; - final Thread t2 = new Thread() { - @Override - public void run() { - try { - db.query("return sleep(1)", null, null, null); - result.add("1"); - } catch (final ArangoDBException e) { - e.printStackTrace(System.err); - } - } - }; - t2.start(); - t1.start(); - t2.join(); - t1.join(); - assertThat(result.size(), is(2)); - } - - @Test - public void multiThreadMultiDatabases() throws Exception { - final ArangoDB arangoDB = new ArangoDB.Builder().build(); - arangoDB.getVersion();// authentication - - try { - arangoDB.createDatabase("db1"); - arangoDB.createDatabase("db2"); - final ArangoDatabase db1 = arangoDB.db("db1"); - final ArangoDatabase db2 = arangoDB.db("db2"); - - final Collection result = new ConcurrentLinkedQueue(); - final Thread t1 = new Thread() { - @Override - public void run() { - try { - db1.query("return sleep(1)", null, null, null); - result.add("1"); - } catch (final ArangoDBException e) { - } - } - }; - final Thread t2 = new Thread() { - @Override - public void run() { - try { - db2.query("return sleep(1)", null, null, null); - result.add("1"); - } catch (final ArangoDBException e) { - } - } - }; - t2.start(); - t1.start(); - t2.join(); - t1.join(); - assertThat(result.size(), is(2)); - } finally { - arangoDB.db("db1").drop(); - arangoDB.db("db2").drop(); - } - } - - @Test - public void minOneConnection() { - final ArangoDB arangoDB = new ArangoDB.Builder().maxConnections(0).build(); - final ArangoDBVersion version = arangoDB.getVersion(); - assertThat(version, is(notNullValue())); - } - - @Test - public void defaultMaxConnection() { - final ArangoDB arangoDB = new ArangoDB.Builder().maxConnections(null).build(); - final ArangoDBVersion version = arangoDB.getVersion(); - assertThat(version, is(notNullValue())); - } -} diff --git a/src/test/java/com/arangodb/util/ArangoSerializationTest.java b/src/test/java/com/arangodb/util/ArangoSerializationTest.java deleted file mode 100644 index e50062475..000000000 --- a/src/test/java/com/arangodb/util/ArangoSerializationTest.java +++ /dev/null @@ -1,103 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.util; - -import static org.hamcrest.Matchers.is; -import static org.junit.Assert.assertThat; - -import java.util.ArrayList; -import java.util.Collection; -import java.util.HashMap; -import java.util.Map; - -import org.junit.BeforeClass; -import org.junit.Test; - -import com.arangodb.ArangoDB; -import com.arangodb.entity.BaseDocument; -import com.arangodb.internal.ArangoExecutorSync; -import com.arangodb.internal.InternalArangoDB; -import com.arangodb.internal.velocystream.internal.ConnectionSync; -import com.arangodb.velocypack.Type; -import com.arangodb.velocypack.VPackBuilder; -import com.arangodb.velocypack.VPackSlice; -import com.arangodb.velocypack.ValueType; -import com.arangodb.velocystream.Response; - -/** - * @author Mark Vollmary - * - */ -public class ArangoSerializationTest { - - private static ArangoSerialization util; - - @BeforeClass - public static void setup() { - final InternalArangoDB arangoDB = new ArangoDB.Builder().build(); - util = arangoDB.util(); - } - - @Test - public void deseriarlize() { - final VPackBuilder builder = new VPackBuilder().add(ValueType.OBJECT).add("foo", "bar").close(); - final BaseDocument doc = util.deserialize(builder.slice(), BaseDocument.class); - assertThat(doc.getAttribute("foo").toString(), is("bar")); - } - - @Test - public void serialize() { - final BaseDocument entity = new BaseDocument(); - entity.addAttribute("foo", "bar"); - final VPackSlice vpack = util.serialize(entity); - assertThat(vpack.get("foo").isString(), is(true)); - assertThat(vpack.get("foo").getAsString(), is("bar")); - } - - @Test - public void serializeNullValues() { - final BaseDocument entity = new BaseDocument(); - entity.addAttribute("foo", null); - final VPackSlice vpack = util.serialize(entity, new ArangoSerializer.Options().serializeNullValues(true)); - assertThat(vpack.get("foo").isNull(), is(true)); - } - - @Test - public void serializeType() { - final Collection list = new ArrayList(); - list.add(new BaseDocument()); - list.add(new BaseDocument()); - - final VPackSlice vpack = util.serialize(list, - new ArangoSerializer.Options().type(new Type>() { - }.getType())); - assertThat(vpack.isArray(), is(true)); - assertThat(vpack.getLength(), is(list.size())); - } - - @Test - public void parseJsonIncludeNull() { - final Map entity = new HashMap(); - entity.put("value", new String[] { "test", null }); - final String json = util.deserialize(util.serialize(entity, new ArangoSerializer.Options()), String.class); - assertThat(json, is("{\"value\":[\"test\",null]}")); - } -} diff --git a/src/test/java/com/arangodb/util/MapBuilderTest.java b/src/test/java/com/arangodb/util/MapBuilderTest.java deleted file mode 100644 index 246202f12..000000000 --- a/src/test/java/com/arangodb/util/MapBuilderTest.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.util; - -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.notNullValue; -import static org.junit.Assert.assertThat; - -import java.util.Map; - -import org.junit.Test; - -/** - * @author Mark Vollmary - * - */ -public class MapBuilderTest { - - @Test - public void build() { - final Map map = new MapBuilder().put("foo", "bar").get(); - assertThat(map.size(), is(1)); - assertThat(map.get("foo"), is(notNullValue())); - assertThat(map.get("foo").toString(), is("bar")); - } -} diff --git a/src/test/resources/arangodb-bad.properties b/src/test/resources/arangodb-bad.properties deleted file mode 100644 index 2b2743531..000000000 --- a/src/test/resources/arangodb-bad.properties +++ /dev/null @@ -1 +0,0 @@ -arangodb.hosts=127.0.0.1:8529,127.0.0.1:fail \ No newline at end of file diff --git a/src/test/resources/arangodb-bad2.properties b/src/test/resources/arangodb-bad2.properties deleted file mode 100644 index 1c19ad869..000000000 --- a/src/test/resources/arangodb-bad2.properties +++ /dev/null @@ -1 +0,0 @@ -arangodb.host=127.0.0.1:8529 diff --git a/src/test/resources/arangodb-ssl.properties b/src/test/resources/arangodb-ssl.properties deleted file mode 100644 index 2943d84ba..000000000 --- a/src/test/resources/arangodb-ssl.properties +++ /dev/null @@ -1,2 +0,0 @@ -arangodb.hosts=localhost:8530 -arangodb.useSsl=true diff --git a/src/test/resources/arangodb.properties b/src/test/resources/arangodb.properties deleted file mode 100644 index 5682618e6..000000000 --- a/src/test/resources/arangodb.properties +++ /dev/null @@ -1 +0,0 @@ -arangodb.hosts=127.0.0.1:8529 diff --git a/src/test/resources/example.truststore b/src/test/resources/example.truststore deleted file mode 100644 index 1c311ed83..000000000 Binary files a/src/test/resources/example.truststore and /dev/null differ diff --git a/src/test/resources/logback-test.xml b/src/test/resources/logback-test.xml deleted file mode 100644 index 645d34624..000000000 --- a/src/test/resources/logback-test.xml +++ /dev/null @@ -1,14 +0,0 @@ - - - - - - %d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n - - - - - - - diff --git a/test-functional/pom.xml b/test-functional/pom.xml new file mode 100644 index 000000000..3266e7db0 --- /dev/null +++ b/test-functional/pom.xml @@ -0,0 +1,216 @@ + + + 4.0.0 + + + ../test-parent + com.arangodb + test-parent + 7.22.0 + + + test-functional + + + + org.eclipse.parsson + parsson + 1.1.7 + test + + + + + + shaded + + + shaded + true + + + + + + com.google.code.maven-replacer-plugin + replacer + + + **/CustomSerdeTest.**, + **/CustomSerdeAsyncTest.**, + **/JacksonInterferenceTest.**, + **/JacksonRequestContextTest.**, + **/HttpProxyTest.**, + **/RequestContextTest.** + + + + com.fasterxml.jackson.databind.JsonNode + com.arangodb.shaded.fasterxml.jackson.databind.JsonNode + + + com.fasterxml.jackson.databind.ObjectNode + com.arangodb.shaded.fasterxml.jackson.databind.ObjectNode + + + com.fasterxml.jackson.databind.node + com.arangodb.shaded.fasterxml.jackson.databind.node + + + com.fasterxml.jackson.databind.ObjectMapper + com.arangodb.shaded.fasterxml.jackson.databind.ObjectMapper + + + com.fasterxml.jackson.core.JsonProcessingException + com.arangodb.shaded.fasterxml.jackson.core.JsonProcessingException + + + + + + + + + default + + + shaded + !true + + + + + org.graalvm.sdk + graal-sdk + ${graalvm.version} + test + + + + + + org.codehaus.mojo + build-helper-maven-plugin + + + generate-test-sources + + add-test-source + + + + src/test-default/java + + + + + + + + + + ssl + + + ssl + true + + + + src/test-ssl/java + + + + no-ssl + + + ssl + !true + + + + src/test/java + + + + native + + + native + true + + + + + + org.graalvm.buildtools + native-maven-plugin + 0.10.6 + true + + + test-native + + generateTestResourceConfig + test + + verify + + + + true + false + + --no-fallback --verbose + --link-at-build-time -H:+ReportExceptionStackTraces + + + + + + + + no-native + + + native + !true + + + + + io.qameta.allure + allure-junit5 + 2.29.1 + test + + + + + static-code-analysis + + + + org.jacoco + jacoco-maven-plugin + + + + prepare-agent + + + + + + com/arangodb/** + + + + + + + + + diff --git a/test-functional/src/test-default/java/graal/BrotliSubstitutions.java b/test-functional/src/test-default/java/graal/BrotliSubstitutions.java new file mode 100644 index 000000000..ccd245cf4 --- /dev/null +++ b/test-functional/src/test-default/java/graal/BrotliSubstitutions.java @@ -0,0 +1,20 @@ +package graal; + +import com.oracle.svm.core.annotate.Substitute; +import com.oracle.svm.core.annotate.TargetClass; + +public class BrotliSubstitutions { + + @TargetClass(className = "io.netty.handler.codec.compression.Brotli") + static final class Target_io_netty_handler_codec_compression_Brotli { + @Substitute + public static boolean isAvailable() { + return false; + } + + @Substitute + public static void ensureAvailability() throws Throwable { + throw new UnsupportedOperationException(); + } + } +} diff --git a/test-functional/src/test-default/java/graal/netty/EmptyByteBufStub.java b/test-functional/src/test-default/java/graal/netty/EmptyByteBufStub.java new file mode 100644 index 000000000..1dc6dabf7 --- /dev/null +++ b/test-functional/src/test-default/java/graal/netty/EmptyByteBufStub.java @@ -0,0 +1,33 @@ +package graal.netty; + +import io.netty.util.internal.PlatformDependent; + +import java.nio.ByteBuffer; + +public final class EmptyByteBufStub { + private static final ByteBuffer EMPTY_BYTE_BUFFER = ByteBuffer.allocateDirect(0); + private static final long EMPTY_BYTE_BUFFER_ADDRESS; + + static { + long emptyByteBufferAddress = 0; + try { + if (PlatformDependent.hasUnsafe()) { + emptyByteBufferAddress = PlatformDependent.directBufferAddress(EMPTY_BYTE_BUFFER); + } + } catch (Throwable t) { + // Ignore + } + EMPTY_BYTE_BUFFER_ADDRESS = emptyByteBufferAddress; + } + + public static ByteBuffer emptyByteBuffer() { + return EMPTY_BYTE_BUFFER; + } + + public static long emptyByteBufferAddress() { + return EMPTY_BYTE_BUFFER_ADDRESS; + } + + private EmptyByteBufStub() { + } +} diff --git a/test-functional/src/test-default/java/graal/netty/graal/HttpContentCompressorSubstitutions.java b/test-functional/src/test-default/java/graal/netty/graal/HttpContentCompressorSubstitutions.java new file mode 100644 index 000000000..92251b77b --- /dev/null +++ b/test-functional/src/test-default/java/graal/netty/graal/HttpContentCompressorSubstitutions.java @@ -0,0 +1,69 @@ +package graal.netty.graal; + +import java.util.function.BooleanSupplier; + +import com.oracle.svm.core.annotate.Substitute; +import com.oracle.svm.core.annotate.TargetClass; + +import io.netty.buffer.ByteBuf; +import io.netty.channel.ChannelHandlerContext; + +public class HttpContentCompressorSubstitutions { + + @TargetClass(className = "io.netty.handler.codec.compression.ZstdEncoder", onlyWith = IsZstdAbsent.class) + public static final class ZstdEncoderFactorySubstitution { + + @Substitute + protected ByteBuf allocateBuffer(ChannelHandlerContext ctx, ByteBuf msg, boolean preferDirect) throws Exception { + throw new UnsupportedOperationException(); + } + + @Substitute + protected void encode(ChannelHandlerContext ctx, ByteBuf in, ByteBuf out) { + throw new UnsupportedOperationException(); + } + + @Substitute + public void flush(final ChannelHandlerContext ctx) { + throw new UnsupportedOperationException(); + } + } + + @Substitute + @TargetClass(className = "io.netty.handler.codec.compression.ZstdConstants", onlyWith = IsZstdAbsent.class) + public static final class ZstdConstants { + + // The constants make calls to com.github.luben.zstd.Zstd so we cut links with that substitution. + + static final int DEFAULT_COMPRESSION_LEVEL = 0; + + static final int MIN_COMPRESSION_LEVEL = 0; + + static final int MAX_COMPRESSION_LEVEL = 0; + + static final int MAX_BLOCK_SIZE = 0; + + static final int DEFAULT_BLOCK_SIZE = 0; + } + + public static class IsZstdAbsent implements BooleanSupplier { + + private boolean zstdAbsent; + + public IsZstdAbsent() { + try { + Class.forName("com.github.luben.zstd.Zstd"); + zstdAbsent = false; + } catch (Exception e) { + // It can be a classloading issue (the library is not available), or a native issue + // (the library for the current OS/arch is not available) + zstdAbsent = true; + } + } + + @Override + public boolean getAsBoolean() { + return zstdAbsent; + } + } +} diff --git a/test-functional/src/test-default/java/graal/netty/graal/NettySubstitutions.java b/test-functional/src/test-default/java/graal/netty/graal/NettySubstitutions.java new file mode 100644 index 000000000..4eab2181b --- /dev/null +++ b/test-functional/src/test-default/java/graal/netty/graal/NettySubstitutions.java @@ -0,0 +1,604 @@ +package graal.netty.graal; + +import com.oracle.svm.core.annotate.Alias; +import com.oracle.svm.core.annotate.RecomputeFieldValue; +import com.oracle.svm.core.annotate.RecomputeFieldValue.Kind; +import com.oracle.svm.core.annotate.Substitute; +import com.oracle.svm.core.annotate.TargetClass; +import graal.netty.EmptyByteBufStub; +import io.netty.bootstrap.AbstractBootstrapConfig; +import io.netty.bootstrap.ChannelFactory; +import io.netty.buffer.ByteBuf; +import io.netty.buffer.ByteBufAllocator; +import io.netty.channel.*; +import io.netty.channel.embedded.EmbeddedChannel; +import io.netty.handler.codec.compression.ZlibCodecFactory; +import io.netty.handler.codec.compression.ZlibWrapper; +import io.netty.handler.codec.http.HttpHeaderValues; +import io.netty.handler.codec.http2.Http2Exception; +import io.netty.handler.ssl.*; +import io.netty.handler.ssl.ApplicationProtocolConfig.SelectorFailureBehavior; +import io.netty.util.concurrent.GlobalEventExecutor; +import io.netty.util.internal.logging.InternalLoggerFactory; +import io.netty.util.internal.logging.JdkLoggerFactory; + +import javax.crypto.NoSuchPaddingException; +import javax.net.ssl.KeyManagerFactory; +import javax.net.ssl.SSLEngine; +import javax.net.ssl.SSLException; +import javax.net.ssl.TrustManagerFactory; +import java.io.File; +import java.io.IOException; +import java.io.InputStream; +import java.nio.ByteBuffer; +import java.security.*; +import java.security.cert.X509Certificate; +import java.security.spec.InvalidKeySpecException; +import java.util.*; +import java.util.concurrent.LinkedBlockingDeque; +import java.util.function.BooleanSupplier; + +import static io.netty.handler.codec.http.HttpHeaderValues.*; + +/** + * This substitution avoid having loggers added to the build + */ +@TargetClass(className = "io.netty.util.internal.logging.InternalLoggerFactory") +final class Target_io_netty_util_internal_logging_InternalLoggerFactory { + + @Substitute + private static InternalLoggerFactory newDefaultFactory(String name) { + return JdkLoggerFactory.INSTANCE; + } +} + +// SSL +// This whole section is mostly about removing static analysis references to openssl/tcnative + +@TargetClass(className = "io.netty.handler.ssl.SslProvider") +final class Target_io_netty_handler_ssl_SslProvider { + @Substitute + public static boolean isAlpnSupported(final SslProvider provider) { + switch (provider) { + case JDK: + return Target_io_netty_handler_ssl_JdkAlpnApplicationProtocolNegotiator.isAlpnSupported(); + case OPENSSL: + case OPENSSL_REFCNT: + return false; + default: + throw new Error("SslProvider unsupported on Quarkus " + provider); + } + } +} + +@TargetClass(className = "io.netty.handler.ssl.JdkAlpnApplicationProtocolNegotiator") +final class Target_io_netty_handler_ssl_JdkAlpnApplicationProtocolNegotiator { + @Alias + static boolean isAlpnSupported() { + return true; + } +} + +/** + * Hardcode io.netty.handler.ssl.OpenSsl as non-available + */ +@TargetClass(className = "io.netty.handler.ssl.OpenSsl") +final class Target_io_netty_handler_ssl_OpenSsl { + + @Alias + @RecomputeFieldValue(kind = Kind.FromAlias) + private static Throwable UNAVAILABILITY_CAUSE = new RuntimeException("OpenSsl unsupported on Quarkus"); + + @Alias + @RecomputeFieldValue(kind = Kind.FromAlias) + static List DEFAULT_CIPHERS = Collections.emptyList(); + + @Alias + @RecomputeFieldValue(kind = Kind.FromAlias) + static Set AVAILABLE_CIPHER_SUITES = Collections.emptySet(); + + @Alias + @RecomputeFieldValue(kind = Kind.FromAlias) + private static Set AVAILABLE_OPENSSL_CIPHER_SUITES = Collections.emptySet(); + + @Alias + @RecomputeFieldValue(kind = Kind.FromAlias) + private static Set AVAILABLE_JAVA_CIPHER_SUITES = Collections.emptySet(); + + @Alias + @RecomputeFieldValue(kind = Kind.FromAlias) + private static boolean SUPPORTS_KEYMANAGER_FACTORY = false; + + @Alias + @RecomputeFieldValue(kind = Kind.FromAlias) + private static boolean SUPPORTS_OCSP = false; + + @Alias + @RecomputeFieldValue(kind = Kind.FromAlias) + static Set SUPPORTED_PROTOCOLS_SET = Collections.emptySet(); + + @Substitute + public static boolean isAvailable() { + return false; + } + + @Substitute + public static int version() { + return -1; + } + + @Substitute + public static String versionString() { + return null; + } + + @Substitute + public static boolean isCipherSuiteAvailable(String cipherSuite) { + return false; + } +} + +@TargetClass(className = "io.netty.handler.ssl.JdkSslServerContext") +final class Target_io_netty_handler_ssl_JdkSslServerContext { + + @Alias + Target_io_netty_handler_ssl_JdkSslServerContext(Provider provider, + X509Certificate[] trustCertCollection, TrustManagerFactory trustManagerFactory, + X509Certificate[] keyCertChain, PrivateKey key, String keyPassword, + KeyManagerFactory keyManagerFactory, Iterable ciphers, CipherSuiteFilter cipherFilter, + ApplicationProtocolConfig apn, long sessionCacheSize, long sessionTimeout, + ClientAuth clientAuth, String[] protocols, boolean startTls, + String keyStore) + throws SSLException { + } +} + +@TargetClass(className = "io.netty.handler.ssl.JdkSslClientContext") +final class Target_io_netty_handler_ssl_JdkSslClientContext { + + @Alias + Target_io_netty_handler_ssl_JdkSslClientContext(Provider sslContextProvider, X509Certificate[] trustCertCollection, + TrustManagerFactory trustManagerFactory, X509Certificate[] keyCertChain, PrivateKey key, + String keyPassword, KeyManagerFactory keyManagerFactory, Iterable ciphers, + CipherSuiteFilter cipherFilter, ApplicationProtocolConfig apn, String[] protocols, + long sessionCacheSize, long sessionTimeout, String keyStoreType) + throws SSLException { + + } +} +@TargetClass(className = "io.netty.handler.ssl.SslHandler$SslEngineType") +final class Target_io_netty_handler_ssl_SslHandler$SslEngineType { + + @Alias + public static Target_io_netty_handler_ssl_SslHandler$SslEngineType JDK; + + @Substitute + static Target_io_netty_handler_ssl_SslHandler$SslEngineType forEngine(SSLEngine engine) { + return JDK; + } +} + +@TargetClass(className = "io.netty.handler.ssl.JdkAlpnApplicationProtocolNegotiator$AlpnWrapper") +final class Target_io_netty_handler_ssl_JdkAlpnApplicationProtocolNegotiator_AlpnWrapper { + @Substitute + public SSLEngine wrapSslEngine(SSLEngine engine, ByteBufAllocator alloc, + JdkApplicationProtocolNegotiator applicationNegotiator, boolean isServer) { + return (SSLEngine) (Object) new Target_io_netty_handler_ssl_JdkAlpnSslEngine(engine, applicationNegotiator, + isServer); + } + +} + +@TargetClass(className = "io.netty.handler.ssl.JdkAlpnSslEngine") +final class Target_io_netty_handler_ssl_JdkAlpnSslEngine { + @Alias + Target_io_netty_handler_ssl_JdkAlpnSslEngine(final SSLEngine engine, + final JdkApplicationProtocolNegotiator applicationNegotiator, final boolean isServer) { + + } +} + +@TargetClass(className = "io.netty.handler.ssl.SslContext") +final class Target_io_netty_handler_ssl_SslContext { + + @Substitute + static SslContext newServerContextInternal(SslProvider provider, Provider sslContextProvider, + X509Certificate[] trustCertCollection, TrustManagerFactory trustManagerFactory, + X509Certificate[] keyCertChain, + PrivateKey key, String keyPassword, KeyManagerFactory keyManagerFactory, Iterable ciphers, + CipherSuiteFilter cipherFilter, ApplicationProtocolConfig apn, long sessionCacheSize, long sessionTimeout, + ClientAuth clientAuth, String[] protocols, boolean startTls, boolean enableOcsp, String keyStoreType, + Map.Entry, Object>... ctxOptions) throws SSLException { + if (enableOcsp) { + throw new IllegalArgumentException("OCSP is not supported with this SslProvider: " + provider); + } + return (SslContext) (Object) new Target_io_netty_handler_ssl_JdkSslServerContext(sslContextProvider, + trustCertCollection, trustManagerFactory, keyCertChain, key, keyPassword, + keyManagerFactory, ciphers, cipherFilter, apn, sessionCacheSize, sessionTimeout, + clientAuth, protocols, startTls, keyStoreType); + } + + @Substitute + static SslContext newClientContextInternal(SslProvider provider, Provider sslContextProvider, + X509Certificate[] trustCert, + TrustManagerFactory trustManagerFactory, X509Certificate[] keyCertChain, PrivateKey key, String keyPassword, + KeyManagerFactory keyManagerFactory, Iterable ciphers, CipherSuiteFilter cipherFilter, + ApplicationProtocolConfig apn, String[] protocols, long sessionCacheSize, long sessionTimeout, + boolean enableOcsp, + String keyStoreType, Map.Entry, Object>... options) throws SSLException { + if (enableOcsp) { + throw new IllegalArgumentException("OCSP is not supported with this SslProvider: " + provider); + } + return (SslContext) (Object) new Target_io_netty_handler_ssl_JdkSslClientContext(sslContextProvider, + trustCert, trustManagerFactory, keyCertChain, key, keyPassword, + keyManagerFactory, ciphers, cipherFilter, apn, protocols, sessionCacheSize, + sessionTimeout, keyStoreType); + } + +} +@TargetClass(className = "io.netty.handler.ssl.JdkDefaultApplicationProtocolNegotiator") +final class Target_io_netty_handler_ssl_JdkDefaultApplicationProtocolNegotiator { + + @Alias + public static Target_io_netty_handler_ssl_JdkDefaultApplicationProtocolNegotiator INSTANCE; +} + +@TargetClass(className = "io.netty.handler.ssl.JdkSslContext") +final class Target_io_netty_handler_ssl_JdkSslContext { + + @Substitute + static JdkApplicationProtocolNegotiator toNegotiator(ApplicationProtocolConfig config, boolean isServer) { + if (config == null) { + return (JdkApplicationProtocolNegotiator) (Object) Target_io_netty_handler_ssl_JdkDefaultApplicationProtocolNegotiator.INSTANCE; + } + + switch (config.protocol()) { + case NONE: + return (JdkApplicationProtocolNegotiator) (Object) Target_io_netty_handler_ssl_JdkDefaultApplicationProtocolNegotiator.INSTANCE; + case ALPN: + if (isServer) { + // GRAAL RC9 bug: https://github.com/oracle/graal/issues/813 + // switch(config.selectorFailureBehavior()) { + // case FATAL_ALERT: + // return new JdkAlpnApplicationProtocolNegotiator(true, config.supportedProtocols()); + // case NO_ADVERTISE: + // return new JdkAlpnApplicationProtocolNegotiator(false, config.supportedProtocols()); + // default: + // throw new UnsupportedOperationException(new StringBuilder("JDK provider does not support ") + // .append(config.selectorFailureBehavior()).append(" failure behavior").toString()); + // } + SelectorFailureBehavior behavior = config.selectorFailureBehavior(); + if (behavior == SelectorFailureBehavior.FATAL_ALERT) { + return new JdkAlpnApplicationProtocolNegotiator(true, config.supportedProtocols()); + } else if (behavior == SelectorFailureBehavior.NO_ADVERTISE) { + return new JdkAlpnApplicationProtocolNegotiator(false, config.supportedProtocols()); + } else { + throw new UnsupportedOperationException(new StringBuilder("JDK provider does not support ") + .append(config.selectorFailureBehavior()).append(" failure behavior").toString()); + } + } else { + switch (config.selectedListenerFailureBehavior()) { + case ACCEPT: + return new JdkAlpnApplicationProtocolNegotiator(false, config.supportedProtocols()); + case FATAL_ALERT: + return new JdkAlpnApplicationProtocolNegotiator(true, config.supportedProtocols()); + default: + throw new UnsupportedOperationException(new StringBuilder("JDK provider does not support ") + .append(config.selectedListenerFailureBehavior()).append(" failure behavior") + .toString()); + } + } + default: + throw new UnsupportedOperationException( + new StringBuilder("JDK provider does not support ").append(config.protocol()) + .append(" protocol") + .toString()); + } + } + +} + +/* + * This one only prints exceptions otherwise we get a useless bogus + * exception message: https://github.com/eclipse-vertx/vert.x/issues/1657 + */ +@TargetClass(className = "io.netty.bootstrap.AbstractBootstrap") +final class Target_io_netty_bootstrap_AbstractBootstrap { + + @Alias + private ChannelFactory channelFactory; + + @Alias + void init(Channel channel) throws Exception { + } + + @Alias + public AbstractBootstrapConfig config() { + return null; + } + + @Substitute + final ChannelFuture initAndRegister() { + Channel channel = null; + try { + channel = channelFactory.newChannel(); + init(channel); + } catch (Throwable t) { + // THE FIX IS HERE: + t.printStackTrace(); + if (channel != null) { + // channel can be null if newChannel crashed (eg SocketException("too many open files")) + channel.unsafe().closeForcibly(); + } + // as the Channel is not registered yet, we need to force the usage of the GlobalEventExecutor + return new DefaultChannelPromise(channel, GlobalEventExecutor.INSTANCE).setFailure(t); + } + + ChannelFuture regFuture = config().group().register(channel); + if (regFuture.cause() != null) { + if (channel.isRegistered()) { + channel.close(); + } else { + channel.unsafe().closeForcibly(); + } + } + + // If we are here and the promise is not failed, it's one of the following cases: + // 1) If we attempted registration from the event loop, the registration has been completed at this point. + // i.e. It's safe to attempt bind() or connect() now because the channel has been registered. + // 2) If we attempted registration from the other thread, the registration request has been successfully + // added to the event loop's task queue for later execution. + // i.e. It's safe to attempt bind() or connect() now: + // because bind() or connect() will be executed *after* the scheduled registration task is executed + // because register(), bind(), and connect() are all bound to the same thread. + + return regFuture; + + } +} + +@TargetClass(className = "io.netty.channel.nio.NioEventLoop") +final class Target_io_netty_channel_nio_NioEventLoop { + + @Substitute + private static Queue newTaskQueue0(int maxPendingTasks) { + return new LinkedBlockingDeque<>(); + } +} + +@TargetClass(className = "io.netty.buffer.AbstractReferenceCountedByteBuf") +final class Target_io_netty_buffer_AbstractReferenceCountedByteBuf { + + @Alias + @RecomputeFieldValue(kind = Kind.FieldOffset, name = "refCnt") + private static long REFCNT_FIELD_OFFSET; +} + +@TargetClass(className = "io.netty.util.AbstractReferenceCounted") +final class Target_io_netty_util_AbstractReferenceCounted { + + @Alias + @RecomputeFieldValue(kind = Kind.FieldOffset, name = "refCnt") + private static long REFCNT_FIELD_OFFSET; +} + +// This class is runtime-initialized by NettyProcessor +final class Holder_io_netty_util_concurrent_ScheduledFutureTask { + static final long START_TIME = System.nanoTime(); +} + +@TargetClass(className = "io.netty.util.concurrent.AbstractScheduledEventExecutor") +final class Target_io_netty_util_concurrent_AbstractScheduledEventExecutor { + + // The START_TIME field is kept but not used. + // All the accesses to it have been replaced with Holder_io_netty_util_concurrent_ScheduledFutureTask + + @Substitute + static long initialNanoTime() { + return Holder_io_netty_util_concurrent_ScheduledFutureTask.START_TIME; + } + + @Substitute + static long defaultCurrentTimeNanos() { + return System.nanoTime() - Holder_io_netty_util_concurrent_ScheduledFutureTask.START_TIME; + } +} + +@TargetClass(className = "io.netty.channel.ChannelHandlerMask") +final class Target_io_netty_channel_ChannelHandlerMask { + + // Netty tries to self-optimized itself, but it requires lots of reflection. We disable this behavior and avoid + // misleading DEBUG messages in the log. + @Substitute + private static boolean isSkippable(final Class handlerType, final String methodName, final Class... paramTypes) { + return false; + } +} + +@TargetClass(className = "io.netty.util.internal.NativeLibraryLoader") +final class Target_io_netty_util_internal_NativeLibraryLoader { + + // This method can trick GraalVM into thinking that Classloader#defineClass is getting called + @Substitute + static Class tryToLoadClass(final ClassLoader loader, final Class helper) + throws ClassNotFoundException { + return Class.forName(helper.getName(), false, loader); + } + +} + +@TargetClass(className = "io.netty.buffer.EmptyByteBuf") +final class Target_io_netty_buffer_EmptyByteBuf { + + @Alias + @RecomputeFieldValue(kind = Kind.Reset) + private static ByteBuffer EMPTY_BYTE_BUFFER; + + @Alias + @RecomputeFieldValue(kind = Kind.Reset) + private static long EMPTY_BYTE_BUFFER_ADDRESS; + + @Substitute + public ByteBuffer nioBuffer() { + return EmptyByteBufStub.emptyByteBuffer(); + } + + @Substitute + public ByteBuffer[] nioBuffers() { + return new ByteBuffer[] { EmptyByteBufStub.emptyByteBuffer() }; + } + + @Substitute + public ByteBuffer internalNioBuffer(int index, int length) { + return EmptyByteBufStub.emptyByteBuffer(); + } + + @Substitute + public boolean hasMemoryAddress() { + return EmptyByteBufStub.emptyByteBufferAddress() != 0; + } + + @Substitute + public long memoryAddress() { + if (hasMemoryAddress()) { + return EmptyByteBufStub.emptyByteBufferAddress(); + } else { + throw new UnsupportedOperationException(); + } + } + +} + +@TargetClass(className = "io.netty.handler.codec.http.HttpContentDecompressor") +final class Target_io_netty_handler_codec_http_HttpContentDecompressor { + + @Alias + private boolean strict; + + @Alias + protected ChannelHandlerContext ctx; + + @Substitute + protected EmbeddedChannel newContentDecoder(String contentEncoding) throws Exception { + if (GZIP.contentEqualsIgnoreCase(contentEncoding) || + X_GZIP.contentEqualsIgnoreCase(contentEncoding)) { + return new EmbeddedChannel(ctx.channel().id(), ctx.channel().metadata().hasDisconnect(), + ctx.channel().config(), ZlibCodecFactory.newZlibDecoder(ZlibWrapper.GZIP)); + } + if (DEFLATE.contentEqualsIgnoreCase(contentEncoding) || + X_DEFLATE.contentEqualsIgnoreCase(contentEncoding)) { + final ZlibWrapper wrapper = strict ? ZlibWrapper.ZLIB : ZlibWrapper.ZLIB_OR_NONE; + // To be strict, 'deflate' means ZLIB, but some servers were not implemented correctly. + return new EmbeddedChannel(ctx.channel().id(), ctx.channel().metadata().hasDisconnect(), + ctx.channel().config(), ZlibCodecFactory.newZlibDecoder(wrapper)); + } + + // 'identity' or unsupported + return null; + } +} + +@TargetClass(className = "io.netty.handler.codec.http2.DelegatingDecompressorFrameListener") +final class Target_io_netty_handler_codec_http2_DelegatingDecompressorFrameListener { + + @Alias + boolean strict; + + @Substitute + protected EmbeddedChannel newContentDecompressor(ChannelHandlerContext ctx, CharSequence contentEncoding) + throws Http2Exception { + if (!HttpHeaderValues.GZIP.contentEqualsIgnoreCase(contentEncoding) + && !HttpHeaderValues.X_GZIP.contentEqualsIgnoreCase(contentEncoding)) { + if (!HttpHeaderValues.DEFLATE.contentEqualsIgnoreCase(contentEncoding) + && !HttpHeaderValues.X_DEFLATE.contentEqualsIgnoreCase(contentEncoding)) { + return null; + } else { + ZlibWrapper wrapper = this.strict ? ZlibWrapper.ZLIB : ZlibWrapper.ZLIB_OR_NONE; + return new EmbeddedChannel(ctx.channel().id(), ctx.channel().metadata().hasDisconnect(), ctx.channel().config(), + new ChannelHandler[] { ZlibCodecFactory.newZlibDecoder(wrapper) }); + } + } else { + return new EmbeddedChannel(ctx.channel().id(), ctx.channel().metadata().hasDisconnect(), ctx.channel().config(), + new ChannelHandler[] { ZlibCodecFactory.newZlibDecoder(ZlibWrapper.GZIP) }); + } + } +} + +@TargetClass(className = "io.netty.handler.ssl.SslHandler") +final class Target_SslHandler { + + @Substitute + private void setOpensslEngineSocketFd(Channel c) { + // do nothing. + } +} + +@TargetClass(className = "io.netty.handler.ssl.PemReader") +final class Alias_PemReader { + + @Alias + public static ByteBuf readPrivateKey(File keyFile) { + return null; + } + + @Alias + public static ByteBuf readPrivateKey(InputStream in) throws KeyException { + return null; + } +} + +/** + * If BouncyCastle is not on the classpath, we must not try to read the PEM file using the BouncyCatle PEM reader. + */ +@TargetClass(className = "io.netty.handler.ssl.SslContext", onlyWith = IsBouncyNotThere.class) +final class Target_SslContext { + + @Substitute + protected static PrivateKey toPrivateKey(File keyFile, String keyPassword) throws NoSuchAlgorithmException, + NoSuchPaddingException, InvalidKeySpecException, + InvalidAlgorithmParameterException, + KeyException, IOException { + if (keyFile == null) { + return null; + } + + return getPrivateKeyFromByteBuffer(Alias_PemReader.readPrivateKey(keyFile), keyPassword); + } + + @Substitute + protected static PrivateKey toPrivateKey(InputStream keyInputStream, String keyPassword) + throws NoSuchAlgorithmException, + NoSuchPaddingException, InvalidKeySpecException, + InvalidAlgorithmParameterException, + KeyException, IOException { + if (keyInputStream == null) { + return null; + } + + return getPrivateKeyFromByteBuffer(Alias_PemReader.readPrivateKey(keyInputStream), keyPassword); + } + + @Alias + private static PrivateKey getPrivateKeyFromByteBuffer(ByteBuf encodedKeyBuf, String keyPassword) + throws NoSuchAlgorithmException, NoSuchPaddingException, InvalidKeySpecException, + InvalidAlgorithmParameterException, KeyException, IOException { + return null; + } +} + +class IsBouncyNotThere implements BooleanSupplier { + + @Override + public boolean getAsBoolean() { + try { + NettySubstitutions.class.getClassLoader().loadClass("org.bouncycastle.openssl.PEMParser"); + return false; + } catch (Exception e) { + return true; + } + } +} + +class NettySubstitutions { + +} diff --git a/test-functional/src/test-default/java/graal/netty/graal/ZLibSubstitutions.java b/test-functional/src/test-default/java/graal/netty/graal/ZLibSubstitutions.java new file mode 100644 index 000000000..7017aaa86 --- /dev/null +++ b/test-functional/src/test-default/java/graal/netty/graal/ZLibSubstitutions.java @@ -0,0 +1,66 @@ +package graal.netty.graal; + +import com.oracle.svm.core.annotate.Substitute; +import com.oracle.svm.core.annotate.TargetClass; +import io.netty.handler.codec.compression.*; + +/** + * This substitution avoid having jcraft zlib added to the build + */ +@TargetClass(className = "io.netty.handler.codec.compression.ZlibCodecFactory") +final class Target_io_netty_handler_codec_compression_ZlibCodecFactory { + + @Substitute + public static ZlibEncoder newZlibEncoder(int compressionLevel) { + return new JdkZlibEncoder(compressionLevel); + } + + @Substitute + public static ZlibEncoder newZlibEncoder(ZlibWrapper wrapper) { + return new JdkZlibEncoder(wrapper); + } + + @Substitute + public static ZlibEncoder newZlibEncoder(ZlibWrapper wrapper, int compressionLevel) { + return new JdkZlibEncoder(wrapper, compressionLevel); + } + + @Substitute + public static ZlibEncoder newZlibEncoder(ZlibWrapper wrapper, int compressionLevel, int windowBits, int memLevel) { + return new JdkZlibEncoder(wrapper, compressionLevel); + } + + @Substitute + public static ZlibEncoder newZlibEncoder(byte[] dictionary) { + return new JdkZlibEncoder(dictionary); + } + + @Substitute + public static ZlibEncoder newZlibEncoder(int compressionLevel, byte[] dictionary) { + return new JdkZlibEncoder(compressionLevel, dictionary); + } + + @Substitute + public static ZlibEncoder newZlibEncoder(int compressionLevel, int windowBits, int memLevel, byte[] dictionary) { + return new JdkZlibEncoder(compressionLevel, dictionary); + } + + @Substitute + public static ZlibDecoder newZlibDecoder() { + return new JdkZlibDecoder(); + } + + @Substitute + public static ZlibDecoder newZlibDecoder(ZlibWrapper wrapper) { + return new JdkZlibDecoder(wrapper); + } + + @Substitute + public static ZlibDecoder newZlibDecoder(byte[] dictionary) { + return new JdkZlibDecoder(dictionary); + } +} + +class ZLibSubstitutions { + +} diff --git a/test-functional/src/test-default/java/graal/netty/package-info.java b/test-functional/src/test-default/java/graal/netty/package-info.java new file mode 100644 index 000000000..8b55354f7 --- /dev/null +++ b/test-functional/src/test-default/java/graal/netty/package-info.java @@ -0,0 +1,4 @@ +/** + * from io.quarkus:quarkus-netty:3.10.1 + */ +package graal.netty; diff --git a/test-functional/src/test-default/java/graal/vertx/graal/JdkSubstitutions.java b/test-functional/src/test-default/java/graal/vertx/graal/JdkSubstitutions.java new file mode 100644 index 000000000..579d7418f --- /dev/null +++ b/test-functional/src/test-default/java/graal/vertx/graal/JdkSubstitutions.java @@ -0,0 +1,98 @@ +package graal.vertx.graal; + +import com.oracle.svm.core.annotate.Alias; +import com.oracle.svm.core.annotate.InjectAccessors; +import com.oracle.svm.core.annotate.Substitute; +import com.oracle.svm.core.annotate.TargetClass; +import org.graalvm.nativeimage.Platform; +import org.graalvm.nativeimage.Platforms; + +import java.io.IOException; +import java.net.URL; +import java.nio.channels.spi.AsynchronousChannelProvider; + +@TargetClass(className = "jdk.internal.loader.URLClassPath$Loader") +final class Target_URLClassPath$Loader { + + @Alias + public Target_URLClassPath$Loader(URL url) { + } +} + +@TargetClass(className = "jdk.internal.loader.URLClassPath$FileLoader") +final class Target_URLClassPath$FileLoader { + + @Alias + public Target_URLClassPath$FileLoader(URL url) throws IOException { + } +} + +@TargetClass(className = "jdk.internal.loader.URLClassPath") +final class Target_jdk_internal_loader_URLClassPath { + + @Substitute + private Target_URLClassPath$Loader getLoader(final URL url) throws IOException { + String file = url.getFile(); + if (file != null && file.endsWith("/")) { + if ("file".equals(url.getProtocol())) { + return (Target_URLClassPath$Loader) (Object) new Target_URLClassPath$FileLoader( + url); + } else { + return new Target_URLClassPath$Loader(url); + } + } else { + // that must be wrong, but JarLoader is deleted by SVM + return (Target_URLClassPath$Loader) (Object) new Target_URLClassPath$FileLoader( + url); + } + } + +} + +@Substitute +@TargetClass(className = "sun.nio.ch.WindowsAsynchronousFileChannelImpl", innerClass = "DefaultIocpHolder") +@Platforms({ Platform.WINDOWS.class }) +final class Target_sun_nio_ch_WindowsAsynchronousFileChannelImpl_DefaultIocpHolder { + + @Alias + @InjectAccessors(DefaultIocpAccessor.class) + static Target_sun_nio_ch_Iocp defaultIocp; +} + +@TargetClass(className = "sun.nio.ch.Iocp") +@Platforms({ Platform.WINDOWS.class }) +final class Target_sun_nio_ch_Iocp { + + @Alias + Target_sun_nio_ch_Iocp(AsynchronousChannelProvider provider, Target_sun_nio_ch_ThreadPool pool) throws IOException { + } + + @Alias + Target_sun_nio_ch_Iocp start() { + return null; + } +} + +@TargetClass(className = "sun.nio.ch.ThreadPool") +@Platforms({ Platform.WINDOWS.class }) +final class Target_sun_nio_ch_ThreadPool { + + @Alias + static Target_sun_nio_ch_ThreadPool createDefault() { + return null; + } +} + +final class DefaultIocpAccessor { + static Target_sun_nio_ch_Iocp get() { + try { + return new Target_sun_nio_ch_Iocp(null, Target_sun_nio_ch_ThreadPool.createDefault()).start(); + } catch (IOException ioe) { + throw new InternalError(ioe); + } + } +} + +class JdkSubstitutions { + +} diff --git a/test-functional/src/test-default/java/graal/vertx/graal/VertxSubstitutions.java b/test-functional/src/test-default/java/graal/vertx/graal/VertxSubstitutions.java new file mode 100644 index 000000000..d8ca211b6 --- /dev/null +++ b/test-functional/src/test-default/java/graal/vertx/graal/VertxSubstitutions.java @@ -0,0 +1,197 @@ +package graal.vertx.graal; + +import com.oracle.svm.core.annotate.Alias; +import com.oracle.svm.core.annotate.Substitute; +import com.oracle.svm.core.annotate.TargetClass; +import io.netty.handler.ssl.*; +import io.vertx.core.MultiMap; +import io.vertx.core.Promise; +import io.vertx.core.Vertx; +import io.vertx.core.dns.AddressResolverOptions; +import io.vertx.core.eventbus.EventBusOptions; +import io.vertx.core.eventbus.impl.HandlerHolder; +import io.vertx.core.eventbus.impl.HandlerRegistration; +import io.vertx.core.eventbus.impl.MessageImpl; +import io.vertx.core.eventbus.impl.OutboundDeliveryContext; +import io.vertx.core.impl.ContextInternal; +import io.vertx.core.impl.VertxInternal; +import io.vertx.core.impl.resolver.DefaultResolverProvider; +import io.vertx.core.impl.transports.JDKTransport; +import io.vertx.core.net.NetServerOptions; +import io.vertx.core.spi.resolver.ResolverProvider; +import io.vertx.core.spi.transport.Transport; + +import javax.net.ssl.KeyManagerFactory; +import javax.net.ssl.SSLException; +import javax.net.ssl.TrustManagerFactory; +import java.util.Collection; +import java.util.List; +import java.util.Set; +import java.util.concurrent.ConcurrentMap; + +@TargetClass(className = "io.vertx.core.impl.VertxBuilder") +final class Target_io_vertx_core_impl_VertxBuilder { + @Substitute + public static Transport nativeTransport() { + return JDKTransport.INSTANCE; + } +} + +/** + * This substitution forces the usage of the blocking DNS resolver + */ +@TargetClass(className = "io.vertx.core.spi.resolver.ResolverProvider") +final class TargetResolverProvider { + + @Substitute + public static ResolverProvider factory(Vertx vertx, AddressResolverOptions options) { + return new DefaultResolverProvider(); + } +} + +@TargetClass(className = "io.vertx.core.net.OpenSSLEngineOptions") +final class Target_io_vertx_core_net_OpenSSLEngineOptions { + + @Substitute + public static boolean isAvailable() { + return false; + } + + @Substitute + public static boolean isAlpnAvailable() { + return false; + } +} + +@SuppressWarnings("rawtypes") +@TargetClass(className = "io.vertx.core.eventbus.impl.clustered.ClusteredEventBus") +final class Target_io_vertx_core_eventbus_impl_clustered_ClusteredEventBusClusteredEventBus { + + @Substitute + private NetServerOptions getServerOptions() { + throw new RuntimeException("Not Implemented"); + } + + @Substitute + public void start(Promise promise) { + throw new RuntimeException("Not Implemented"); + } + + @Substitute + public void close(Promise promise) { + throw new RuntimeException("Not Implemented"); + } + + @Substitute + public MessageImpl createMessage(boolean send, boolean isLocal, String address, MultiMap headers, Object body, + String codecName) { + throw new RuntimeException("Not Implemented"); + } + + @Substitute + protected void onLocalRegistration(HandlerHolder handlerHolder, Promise promise) { + throw new RuntimeException("Not Implemented"); + } + + @Substitute + protected HandlerHolder createHandlerHolder(HandlerRegistration registration, boolean replyHandler, + boolean localOnly, ContextInternal context) { + throw new RuntimeException("Not Implemented"); + } + + @Substitute + protected void onLocalUnregistration(HandlerHolder handlerHolder, Promise completionHandler) { + throw new RuntimeException("Not Implemented"); + } + + @Substitute + protected void sendOrPub(OutboundDeliveryContext sendContext) { + throw new RuntimeException("Not Implemented"); + } + + @Substitute + protected String generateReplyAddress() { + throw new RuntimeException("Not Implemented"); + } + + @Substitute + protected boolean isMessageLocal(MessageImpl msg) { + throw new RuntimeException("Not Implemented"); + } + + @Substitute + ConcurrentMap connections() { + throw new RuntimeException("Not Implemented"); + } + + @Substitute + VertxInternal vertx() { + throw new RuntimeException("Not Implemented"); + } + + @Substitute + EventBusOptions options() { + throw new RuntimeException("Not Implemented"); + } +} + +@TargetClass(className = "io.vertx.core.spi.tls.DefaultSslContextFactory") +final class Target_DefaultSslContextFactory { + + @Alias + private Set enabledCipherSuites; + + @Alias + private List applicationProtocols; + + @Alias + private ClientAuth clientAuth; + + @Substitute + private SslContext createContext(boolean useAlpn, boolean client, KeyManagerFactory kmf, TrustManagerFactory tmf) + throws SSLException { + SslContextBuilder builder; + if (client) { + builder = SslContextBuilder.forClient(); + if (kmf != null) { + builder.keyManager(kmf); + } + } else { + builder = SslContextBuilder.forServer(kmf); + } + Collection cipherSuites = enabledCipherSuites; + builder.sslProvider(SslProvider.JDK); + if (cipherSuites == null || cipherSuites.isEmpty()) { + cipherSuites = Target_io_vertx_core_spi_tls_DefaultJDKCipherSuite.get(); + } + if (tmf != null) { + builder.trustManager(tmf); + } + if (cipherSuites != null && cipherSuites.size() > 0) { + builder.ciphers(cipherSuites); + } + if (useAlpn && applicationProtocols != null && applicationProtocols.size() > 0) { + builder.applicationProtocolConfig(new ApplicationProtocolConfig( + ApplicationProtocolConfig.Protocol.ALPN, + ApplicationProtocolConfig.SelectorFailureBehavior.NO_ADVERTISE, + ApplicationProtocolConfig.SelectedListenerFailureBehavior.ACCEPT, + applicationProtocols)); + } + if (clientAuth != null) { + builder.clientAuth(clientAuth); + } + return builder.build(); + } +} + +@TargetClass(className = "io.vertx.core.spi.tls.DefaultJDKCipherSuite") +final class Target_io_vertx_core_spi_tls_DefaultJDKCipherSuite { + @Alias + static List get() { + return null; + } +} + +class VertxSubstitutions { + +} diff --git a/test-functional/src/test-default/java/graal/vertx/graal/package-info.java b/test-functional/src/test-default/java/graal/vertx/graal/package-info.java new file mode 100644 index 000000000..f6cb91e99 --- /dev/null +++ b/test-functional/src/test-default/java/graal/vertx/graal/package-info.java @@ -0,0 +1,4 @@ +/** + * from io.quarkus:quarkus-vertx:3.10.1 + */ +package graal.vertx.graal; diff --git a/test-functional/src/test-ssl/java/com/arangodb/ArangoSslTest.java b/test-functional/src/test-ssl/java/com/arangodb/ArangoSslTest.java new file mode 100644 index 000000000..b454c6111 --- /dev/null +++ b/test-functional/src/test-ssl/java/com/arangodb/ArangoSslTest.java @@ -0,0 +1,107 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb; + +import com.arangodb.config.ArangoConfigProperties; +import com.arangodb.entity.ArangoDBVersion; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.EnumSource; + +import javax.net.ssl.SSLHandshakeException; +import java.util.List; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.catchThrowable; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + + +/** + * @author Mark Vollmary + * @author Michele Rastelli + */ +class ArangoSslTest extends BaseTest { + + @ParameterizedTest + @EnumSource(Protocol.class) + void connect(Protocol protocol) { + assumeTrue(protocol != Protocol.VST); + + final ArangoDB arangoDB = new ArangoDB.Builder() + .protocol(protocol) + .host("172.28.0.1", 8529) + .password("test") + .useSsl(true) + .sslContext(createSslContext()) + .verifyHost(false) + .build(); + final ArangoDBVersion version = arangoDB.getVersion(); + assertThat(version).isNotNull(); + } + + @ParameterizedTest + @EnumSource(Protocol.class) + void connectWithCertConf(Protocol protocol) { + assumeTrue(protocol != Protocol.VST); + + final ArangoDB arangoDB = new ArangoDB.Builder() + .protocol(protocol) + .host("172.28.0.1", 8529) + .password("test") + .useSsl(true) + .sslCertValue("MIIDezCCAmOgAwIBAgIEeDCzXzANBgkqhkiG9w0BAQsFADBuMRAwDgYDVQQGEwdVbmtub3duMRAwDgYDVQQIEwdVbmtub3duMRAwDgYDVQQHEwdVbmtub3duMRAwDgYDVQQKEwdVbmtub3duMRAwDgYDVQQLEwdVbmtub3duMRIwEAYDVQQDEwlsb2NhbGhvc3QwHhcNMjAxMTAxMTg1MTE5WhcNMzAxMDMwMTg1MTE5WjBuMRAwDgYDVQQGEwdVbmtub3duMRAwDgYDVQQIEwdVbmtub3duMRAwDgYDVQQHEwdVbmtub3duMRAwDgYDVQQKEwdVbmtub3duMRAwDgYDVQQLEwdVbmtub3duMRIwEAYDVQQDEwlsb2NhbGhvc3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC1WiDnd4+uCmMG539ZNZB8NwI0RZF3sUSQGPx3lkqaFTZVEzMZL76HYvdc9Qg7difyKyQ09RLSpMALX9euSseD7bZGnfQH52BnKcT09eQ3wh7aVQ5sN2omygdHLC7X9usntxAfv7NzmvdogNXoJQyY/hSZff7RIqWH8NnAUKkjqOe6Bf5LDbxHKESmrFBxOCOnhcpvZWetwpiRdJVPwUn5P82CAZzfiBfmBZnB7D0l+/6Cv4jMuH26uAIcixnVekBQzl1RgwczuiZf2MGO64vDMMJJWE9ClZF1uQuQrwXF6qwhuP1Hnkii6wNbTtPWlGSkqeutr004+Hzbf8KnRY4PAgMBAAGjITAfMB0GA1UdDgQWBBTBrv9Awynt3C5IbaCNyOW5v4DNkTANBgkqhkiG9w0BAQsFAAOCAQEAIm9rPvDkYpmzpSIhR3VXG9Y71gxRDrqkEeLsMoEyqGnw/zx1bDCNeGg2PncLlW6zTIipEBooixIE9U7KxHgZxBy0Et6EEWvIUmnr6F4F+dbTD050GHlcZ7eOeqYTPYeQC502G1Fo4tdNi4lDP9L9XZpf7Q1QimRH2qaLS03ZFZa2tY7ah/RQqZL8Dkxx8/zc25sgTHVpxoK853glBVBs/ENMiyGJWmAXQayewY3EPt/9wGwV4KmU3dPDleQeXSUGPUISeQxFjy+jCw21pYviWVJTNBA9l5ny3GhEmcnOT/gQHCvVRLyGLMbaMZ4JrPwb+aAtBgrgeiK4xeSMMvrbhw==") + .verifyHost(false) + .build(); + final ArangoDBVersion version = arangoDB.getVersion(); + assertThat(version).isNotNull(); + } + + @ParameterizedTest + @EnumSource(Protocol.class) + void connectWithFileProperties(Protocol protocol) { + assumeTrue(protocol != Protocol.VST); + + final ArangoDB arangoDB = new ArangoDB.Builder() + .loadProperties(ArangoConfigProperties.fromFile("arangodb-ssl.properties")) + .protocol(protocol) + .build(); + final ArangoDBVersion version = arangoDB.getVersion(); + assertThat(version).isNotNull(); + } + + @ParameterizedTest + @EnumSource(Protocol.class) + void connectWithoutValidSslContext(Protocol protocol) { + assumeTrue(protocol != Protocol.VST); + + final ArangoDB arangoDB = new ArangoDB.Builder() + .protocol(protocol) + .host("172.28.0.1", 8529) + .useSsl(true) + .build(); + Throwable thrown = catchThrowable(arangoDB::getVersion); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + ArangoDBException ex = (ArangoDBException) thrown; + assertThat(ex.getCause()).isInstanceOf(ArangoDBMultipleException.class); + List exceptions = ((ArangoDBMultipleException) ex.getCause()).getExceptions(); + exceptions.forEach(e -> assertThat(e).isInstanceOf(SSLHandshakeException.class)); + } + +} diff --git a/test-functional/src/test-ssl/java/com/arangodb/BaseTest.java b/test-functional/src/test-ssl/java/com/arangodb/BaseTest.java new file mode 100644 index 000000000..bfbb1903a --- /dev/null +++ b/test-functional/src/test-ssl/java/com/arangodb/BaseTest.java @@ -0,0 +1,57 @@ +package com.arangodb; + +import com.arangodb.entity.ArangoDBVersion; +import org.junit.jupiter.api.BeforeAll; + +import javax.net.ssl.KeyManagerFactory; +import javax.net.ssl.SSLContext; +import javax.net.ssl.TrustManagerFactory; +import java.security.KeyStore; + +abstract class BaseTest { + /*- + * a SSL trust store + * + * create the trust store for the self signed certificate: + * keytool -import -alias "my arangodb server cert" -file server.pem -keystore example.truststore + * + * Documentation: + * https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache/http/conn/ssl/SSLSocketFactory.html + */ + private static final String SSL_TRUSTSTORE = "/example.truststore"; + private static final String SSL_TRUSTSTORE_PASSWORD = "12345678"; + static ArangoDBVersion version; + + @BeforeAll + static void fetchVersion() { + ArangoDB adb = new ArangoDB.Builder() + .host("172.28.0.1", 8529) + .password("test") + .useSsl(true) + .sslContext(createSslContext()) + .verifyHost(false) + .build(); + version = adb.getVersion(); + adb.shutdown(); + } + + static SSLContext createSslContext() { + SSLContext sc; + try { + KeyStore ks = KeyStore.getInstance(KeyStore.getDefaultType()); + ks.load(SslExampleTest.class.getResourceAsStream(SSL_TRUSTSTORE), SSL_TRUSTSTORE_PASSWORD.toCharArray()); + + KeyManagerFactory kmf = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm()); + kmf.init(ks, SSL_TRUSTSTORE_PASSWORD.toCharArray()); + + TrustManagerFactory tmf = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm()); + tmf.init(ks); + + sc = SSLContext.getInstance("TLS"); + sc.init(kmf.getKeyManagers(), tmf.getTrustManagers(), null); + } catch (Exception e) { + throw new RuntimeException(e); + } + return sc; + } +} diff --git a/test-functional/src/test-ssl/java/com/arangodb/HttpProxyTest.java b/test-functional/src/test-ssl/java/com/arangodb/HttpProxyTest.java new file mode 100644 index 000000000..2e74a1fb0 --- /dev/null +++ b/test-functional/src/test-ssl/java/com/arangodb/HttpProxyTest.java @@ -0,0 +1,103 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb; + +import com.arangodb.entity.ArangoDBVersion; +import com.arangodb.http.HttpProtocolConfig; +import io.netty.handler.proxy.ProxyConnectException; +import io.vertx.core.net.ProxyOptions; +import io.vertx.core.net.ProxyType; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.EnumSource; + +import java.util.List; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.catchThrowable; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + + +/** + * NB: excluded from shaded tests + */ +class HttpProxyTest extends BaseTest { + + @ParameterizedTest + @EnumSource(Protocol.class) + void httpProxy(Protocol protocol) { + assumeTrue(protocol != Protocol.VST); + + final ArangoDB arangoDB = new ArangoDB.Builder() + .protocol(protocol) + .host("172.28.0.1", 8529) + .password("test") + .useSsl(true) + .sslContext(createSslContext()) + .verifyHost(false) + .protocolConfig(HttpProtocolConfig.builder() + .proxyOptions(new ProxyOptions() + .setType(ProxyType.HTTP) + .setHost("172.28.0.1") + .setPort(8888) + .setUsername("user") + .setPassword("password")) + .build()) + .build(); + final ArangoDBVersion version = arangoDB.getVersion(); + assertThat(version).isNotNull(); + } + + + @ParameterizedTest + @EnumSource(Protocol.class) + void httpProxyWrongPassword(Protocol protocol) { + assumeTrue(protocol != Protocol.VST); + + final ArangoDB arangoDB = new ArangoDB.Builder() + .protocol(protocol) + .host("172.28.0.1", 8529) + .password("test") + .useSsl(true) + .sslContext(createSslContext()) + .verifyHost(false) + .protocolConfig(HttpProtocolConfig.builder() + .proxyOptions(new ProxyOptions() + .setType(ProxyType.HTTP) + .setHost("172.28.0.1") + .setPort(8888) + .setUsername("user") + .setPassword("wrong")) + .build()) + .build(); + Throwable thrown = catchThrowable(arangoDB::getVersion); + assertThat(thrown) + .isInstanceOf(ArangoDBException.class) + .hasMessageContaining("Cannot contact any host!") + .cause() + .isInstanceOf(ArangoDBMultipleException.class); + List causes = ((ArangoDBMultipleException) thrown.getCause()).getExceptions(); + assertThat(causes).allSatisfy(e -> assertThat(e) + .isInstanceOf(ProxyConnectException.class) + .hasMessageContaining("status: 401 Unauthorized")); + assertThat(version).isNotNull(); + } + +} diff --git a/test-functional/src/test-ssl/java/com/arangodb/SslExampleTest.java b/test-functional/src/test-ssl/java/com/arangodb/SslExampleTest.java new file mode 100644 index 000000000..ffaec30cb --- /dev/null +++ b/test-functional/src/test-ssl/java/com/arangodb/SslExampleTest.java @@ -0,0 +1,95 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb; + +import com.arangodb.entity.ArangoDBVersion; +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.EnumSource; +import utils.TestUtils; + +import javax.net.ssl.SSLHandshakeException; +import java.util.List; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.catchThrowable; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + +/** + * @author Mark Vollmary + * @author Michele Rastelli + */ +class SslExampleTest extends BaseTest { + + @Disabled("Only local execution, in CircleCI port 8529 exposed to localhost") + @ParameterizedTest + @EnumSource(Protocol.class) + void connect(Protocol protocol) { + assumeTrue(!protocol.equals(Protocol.VST) || TestUtils.isLessThanVersion(version.getVersion(), 3, 12, 0)); + final ArangoDB arangoDB = new ArangoDB.Builder() + .host("localhost", 8529) + .password("test") + .useSsl(true) + .sslContext(createSslContext()) + .protocol(protocol) + .build(); + final ArangoDBVersion version = arangoDB.getVersion(); + assertThat(version).isNotNull(); + } + + @ParameterizedTest + @EnumSource(Protocol.class) + void noopHostnameVerifier(Protocol protocol) { + assumeTrue(!protocol.equals(Protocol.VST) || TestUtils.isLessThanVersion(version.getVersion(), 3, 12, 0)); + final ArangoDB arangoDB = new ArangoDB.Builder() + .host("172.28.0.1", 8529) + .password("test") + .useSsl(true) + .sslContext(createSslContext()) + .verifyHost(false) + .protocol(protocol) + .build(); + final ArangoDBVersion version = arangoDB.getVersion(); + assertThat(version).isNotNull(); + } + + @ParameterizedTest + @EnumSource(Protocol.class) + void hostnameVerifierFailure(Protocol protocol) { + assumeTrue(protocol != Protocol.VST, "VST does not support hostname verification"); + final ArangoDB arangoDB = new ArangoDB.Builder() + .host("172.28.0.1", 8529) + .password("test") + .useSsl(true) + .sslContext(createSslContext()) + .verifyHost(true) + .protocol(protocol) + .build(); + Throwable thrown = catchThrowable(arangoDB::getVersion); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + ArangoDBException ex = (ArangoDBException) thrown; + assertThat(ex.getCause()).isInstanceOf(ArangoDBMultipleException.class); + List exceptions = ((ArangoDBMultipleException) ex.getCause()).getExceptions(); + exceptions.forEach(e -> assertThat(e).isInstanceOf(SSLHandshakeException.class)); + } + + +} diff --git a/test-functional/src/test-ssl/java/utils/TestUtils.java b/test-functional/src/test-ssl/java/utils/TestUtils.java new file mode 100644 index 000000000..379cb4762 --- /dev/null +++ b/test-functional/src/test-ssl/java/utils/TestUtils.java @@ -0,0 +1,72 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + + +package utils; + + +/** + * @author Michele Rastelli + */ +public final class TestUtils { + + private TestUtils() { + } + + /** + * Parses {@param version} and checks whether it is greater or equal to <{@param otherMajor}, {@param otherMinor}, + * {@param otherPatch}> comparing the corresponding version components in lexicographical order. + */ + public static boolean isAtLeastVersion(final String version, final int otherMajor, final int otherMinor, + final int otherPatch) { + return compareVersion(version, otherMajor, otherMinor, otherPatch) >= 0; + } + + /** + * Parses {@param version} and checks whether it is less than <{@param otherMajor}, {@param otherMinor}, + * {@param otherPatch}> comparing the corresponding version components in lexicographical order. + */ + public static boolean isLessThanVersion(final String version, final int otherMajor, final int otherMinor, + final int otherPatch) { + return compareVersion(version, otherMajor, otherMinor, otherPatch) < 0; + } + + private static int compareVersion(final String version, final int otherMajor, final int otherMinor, + final int otherPatch) { + String[] parts = version.split("-")[0].split("\\."); + + int major = Integer.parseInt(parts[0]); + int minor = Integer.parseInt(parts[1]); + int patch = Integer.parseInt(parts[2]); + + int majorComparison = Integer.compare(major, otherMajor); + if (majorComparison != 0) { + return majorComparison; + } + + int minorComparison = Integer.compare(minor, otherMinor); + if (minorComparison != 0) { + return minorComparison; + } + + return Integer.compare(patch, otherPatch); + } + +} diff --git a/test-functional/src/test/java/com/arangodb/ArangoCollectionAsyncTest.java b/test-functional/src/test/java/com/arangodb/ArangoCollectionAsyncTest.java new file mode 100644 index 000000000..77737ceef --- /dev/null +++ b/test-functional/src/test/java/com/arangodb/ArangoCollectionAsyncTest.java @@ -0,0 +1,3703 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb; + +import com.arangodb.entity.*; +import com.arangodb.internal.serde.SerdeUtils; +import com.arangodb.model.*; +import com.arangodb.model.DocumentImportOptions.OnDuplicate; +import com.arangodb.serde.jackson.Id; +import com.arangodb.serde.jackson.JacksonSerde; +import com.arangodb.serde.jackson.Key; +import com.arangodb.serde.jackson.Rev; +import com.arangodb.util.*; +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonTypeInfo; +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.node.JsonNodeFactory; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; + +import java.util.*; +import java.util.concurrent.ExecutionException; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import java.util.stream.Stream; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.catchThrowable; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + +/** + * @author Mark Vollmary + * @author Michele Rastelli + */ +class ArangoCollectionAsyncTest extends BaseJunit5 { + + private static final String COLLECTION_NAME = "ArangoCollectionTest_collection"; + private static final String EDGE_COLLECTION_NAME = "ArangoCollectionTest_edge_collection"; + + private final ObjectMapper mapper = new ObjectMapper(); + + private static Stream asyncCols() { + return asyncDbsStream().map(mapNamedPayload(db -> db.collection(COLLECTION_NAME))).map(Arguments::of); + } + + private static Stream edges() { + return dbsStream().map(mapNamedPayload(db -> db.collection(EDGE_COLLECTION_NAME))).map(Arguments::of); + } + + @BeforeAll + static void init() { + initCollections(COLLECTION_NAME); + initEdgeCollections(EDGE_COLLECTION_NAME); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void insertDocument(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + final DocumentCreateEntity doc = collection.insertDocument(new BaseDocument(), null).get(); + assertThat(doc).isNotNull(); + assertThat(doc.getId()).isNotNull(); + assertThat(doc.getKey()).isNotNull(); + assertThat(doc.getRev()).isNotNull(); + assertThat(doc.getNew()).isNull(); + assertThat(doc.getId()).isEqualTo(COLLECTION_NAME + "/" + doc.getKey()); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void insertDocumentWithArrayWithNullValues(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + List arr = Arrays.asList("a", null); + BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + doc.addAttribute("arr", arr); + + final DocumentCreateEntity insertedDoc = collection.insertDocument(doc, + new DocumentCreateOptions().returnNew(true)).get(); + assertThat(insertedDoc).isNotNull(); + assertThat(insertedDoc.getId()).isNotNull(); + assertThat(insertedDoc.getKey()).isNotNull(); + assertThat(insertedDoc.getRev()).isNotNull(); + assertThat(insertedDoc.getId()).isEqualTo(COLLECTION_NAME + "/" + insertedDoc.getKey()); + //noinspection unchecked + assertThat((List) insertedDoc.getNew().getAttribute("arr")).containsAll(Arrays.asList("a", null)); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void insertDocumentWithNullValues(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + doc.addAttribute("null", null); + + final DocumentCreateEntity insertedDoc = collection.insertDocument(doc, + new DocumentCreateOptions().returnNew(true)).get(); + assertThat(insertedDoc).isNotNull(); + assertThat(insertedDoc.getId()).isNotNull(); + assertThat(insertedDoc.getKey()).isNotNull(); + assertThat(insertedDoc.getRev()).isNotNull(); + assertThat(insertedDoc.getId()).isEqualTo(COLLECTION_NAME + "/" + insertedDoc.getKey()); + assertThat(insertedDoc.getNew().getProperties()).containsKey("null"); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void insertDocumentUpdateRev(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + final BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + final DocumentCreateEntity createResult = collection.insertDocument(doc, null).get(); + assertThat(doc.getRevision()).isNull(); + assertThat(createResult.getRev()).isNotNull(); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void insertDocumentReturnNew(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + final DocumentCreateOptions options = new DocumentCreateOptions().returnNew(true); + final DocumentCreateEntity doc = collection.insertDocument(new BaseDocument(), options).get(); + assertThat(doc).isNotNull(); + assertThat(doc.getId()).isNotNull(); + assertThat(doc.getKey()).isNotNull(); + assertThat(doc.getRev()).isNotNull(); + assertThat(doc.getNew()).isNotNull(); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void insertDocumentWithTypeOverwriteModeReplace(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 7)); + assumeTrue(collection.getSerde().getUserSerde() instanceof JacksonSerde, "polymorphic deserialization support" + + " required"); + + String key = UUID.randomUUID().toString(); + Dog dog = new Dog(key, "Teddy"); + Cat cat = new Cat(key, "Luna"); + + final DocumentCreateOptions options = new DocumentCreateOptions() + .returnNew(true) + .returnOld(true) + .overwriteMode(OverwriteMode.replace); + collection.insertDocument(dog, options).get(); + final DocumentCreateEntity doc = collection.insertDocument(cat, options, Animal.class).get(); + assertThat(doc).isNotNull(); + assertThat(doc.getId()).isNotNull(); + assertThat(doc.getKey()).isNotNull().isEqualTo(key); + assertThat(doc.getRev()).isNotNull(); + + assertThat(doc.getOld()) + .isNotNull() + .isInstanceOf(Dog.class); + assertThat(doc.getOld().getKey()).isEqualTo(key); + assertThat(doc.getOld().getName()).isEqualTo("Teddy"); + + assertThat(doc.getNew()) + .isNotNull() + .isInstanceOf(Cat.class); + assertThat(doc.getNew().getKey()).isEqualTo(key); + assertThat(doc.getNew().getName()).isEqualTo("Luna"); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void insertDocumentOverwriteModeIgnore(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 7)); + + String key = "key-" + UUID.randomUUID(); + final BaseDocument doc = new BaseDocument(key); + doc.addAttribute("foo", "a"); + final DocumentCreateEntity meta = collection.insertDocument(doc).get(); + + final BaseDocument doc2 = new BaseDocument(key); + doc2.addAttribute("bar", "b"); + final DocumentCreateEntity insertIgnore = collection.insertDocument(doc2, + new DocumentCreateOptions().overwriteMode(OverwriteMode.ignore)).get(); + + assertThat(insertIgnore).isNotNull(); + assertThat(insertIgnore.getRev()).isEqualTo(meta.getRev()); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void insertDocumentOverwriteModeConflict(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 7)); + + String key = "key-" + UUID.randomUUID(); + final BaseDocument doc = new BaseDocument(key); + doc.addAttribute("foo", "a"); + collection.insertDocument(doc).get(); + + final BaseDocument doc2 = new BaseDocument(key); + Throwable thrown = catchThrowable(() -> collection.insertDocument(doc2, + new DocumentCreateOptions().overwriteMode(OverwriteMode.conflict)).get()).getCause(); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + ArangoDBException e = (ArangoDBException) thrown; + assertThat(e.getResponseCode()).isEqualTo(409); + assertThat(e.getErrorNum()).isEqualTo(1210); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void insertDocumentOverwriteModeReplace(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 7)); + + String key = "key-" + UUID.randomUUID(); + final BaseDocument doc = new BaseDocument(key); + doc.addAttribute("foo", "a"); + final DocumentCreateEntity meta = collection.insertDocument(doc).get(); + + final BaseDocument doc2 = new BaseDocument(key); + doc2.addAttribute("bar", "b"); + final DocumentCreateEntity repsert = collection.insertDocument(doc2, + new DocumentCreateOptions().overwriteMode(OverwriteMode.replace).returnNew(true)).get(); + + assertThat(repsert).isNotNull(); + assertThat(repsert.getRev()).isNotEqualTo(meta.getRev()); + assertThat(repsert.getNew().getProperties().containsKey("foo")).isFalse(); + assertThat(repsert.getNew().getAttribute("bar")).isEqualTo("b"); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void insertDocumentOverwriteModeUpdate(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 7)); + + final BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + doc.addAttribute("foo", "a"); + final DocumentCreateEntity meta = collection.insertDocument(doc).get(); + + doc.addAttribute("bar", "b"); + final DocumentCreateEntity updated = collection.insertDocument(doc, + new DocumentCreateOptions().overwriteMode(OverwriteMode.update).returnNew(true)).get(); + + assertThat(updated).isNotNull(); + assertThat(updated.getRev()).isNotEqualTo(meta.getRev()); + assertThat(updated.getNew().getAttribute("foo")).isEqualTo("a"); + assertThat(updated.getNew().getAttribute("bar")).isEqualTo("b"); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void insertDocumentOverwriteModeUpdateMergeObjectsFalse(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 7)); + + final BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + Map fieldA = Collections.singletonMap("a", "a"); + doc.addAttribute("foo", fieldA); + final DocumentCreateEntity meta = collection.insertDocument(doc).get(); + + Map fieldB = Collections.singletonMap("b", "b"); + doc.addAttribute("foo", fieldB); + final DocumentCreateEntity updated = collection.insertDocument(doc, + new DocumentCreateOptions().overwriteMode(OverwriteMode.update).mergeObjects(false).returnNew(true)).get(); + + assertThat(updated).isNotNull(); + assertThat(updated.getRev()).isNotEqualTo(meta.getRev()); + assertThat(updated.getNew().getAttribute("foo")).isEqualTo(fieldB); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void insertDocumentOverwriteModeUpdateKeepNullTrue(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 7)); + + final BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + doc.addAttribute("foo", "bar"); + collection.insertDocument(doc).get(); + + doc.updateAttribute("foo", null); + final BaseDocument updated = collection.insertDocument(doc, new DocumentCreateOptions() + .overwriteMode(OverwriteMode.update) + .keepNull(true) + .returnNew(true)).get().getNew(); + + assertThat(updated.getProperties()).containsEntry("foo", null); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void insertDocumentOverwriteModeUpdateKeepNullFalse(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 7)); + + final BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + doc.addAttribute("foo", "bar"); + collection.insertDocument(doc).get(); + + doc.updateAttribute("foo", null); + final BaseDocument updated = collection.insertDocument(doc, new DocumentCreateOptions() + .overwriteMode(OverwriteMode.update) + .keepNull(false) + .returnNew(true)).get().getNew(); + + assertThat(updated.getProperties()).doesNotContainKey("foo"); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void insertDocumentOverwriteModeUpdateWithExternalVersioning(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 12)); + + BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + doc.addAttribute("_version", 1); + collection.insertDocument(doc).get(); + doc.addAttribute("_version", 2); + DocumentCreateEntity updateResult = collection.insertDocument( + doc, + new DocumentCreateOptions() + .overwriteMode(OverwriteMode.update) + .versionAttribute("_version") + .returnNew(true) + ).get(); + assertThat(updateResult.getNew().getAttribute("_version")).isEqualTo(2); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void insertDocumentOverwriteModeUpdateWithExternalVersioningFail(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 12)); + + BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + doc.addAttribute("_version", 1); + collection.insertDocument(doc).get(); + doc.addAttribute("_version", 0); + DocumentCreateEntity updateResult = collection.insertDocument( + doc, + new DocumentCreateOptions() + .overwriteMode(OverwriteMode.update) + .versionAttribute("_version") + .returnNew(true) + ).get(); + assertThat(updateResult.getNew().getAttribute("_version")).isEqualTo(1); + + } + + @ParameterizedTest + @MethodSource("asyncCols") + void insertDocumentsOverwriteModeUpdateWithExternalVersioning(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 12)); + + BaseDocument d1 = new BaseDocument(UUID.randomUUID().toString()); + d1.addAttribute("_version", 1); + BaseDocument d2 = new BaseDocument(UUID.randomUUID().toString()); + d2.addAttribute("_version", 1); + + collection.insertDocuments(Arrays.asList(d1, d2)).get(); + + d1.addAttribute("_version", 2); + d2.addAttribute("_version", 2); + MultiDocumentEntity> updateResult = collection.insertDocuments( + Arrays.asList(d1, d2), + new DocumentCreateOptions() + .overwriteMode(OverwriteMode.update) + .versionAttribute("_version") + .returnNew(true), + BaseDocument.class + ).get(); + + assertThat(updateResult.getDocuments()).allSatisfy(it -> { + assertThat(it.getNew()).isNotNull(); + assertThat(it.getNew().getAttribute("_version")).isEqualTo(2); + }); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void insertDocumentsOverwriteModeUpdateWithExternalVersioningFail(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 12)); + + BaseDocument d1 = new BaseDocument(UUID.randomUUID().toString()); + d1.addAttribute("_version", 1); + BaseDocument d2 = new BaseDocument(UUID.randomUUID().toString()); + d2.addAttribute("_version", 1); + + collection.insertDocuments(Arrays.asList(d1, d2)).get(); + + d1.addAttribute("_version", 0); + d2.addAttribute("_version", 0); + MultiDocumentEntity> updateResult = collection.insertDocuments( + Arrays.asList(d1, d2), + new DocumentCreateOptions() + .overwriteMode(OverwriteMode.update) + .versionAttribute("_version") + .returnNew(true), + BaseDocument.class + ).get(); + + assertThat(updateResult.getDocuments()).allSatisfy(it -> { + assertThat(it.getNew()).isNotNull(); + assertThat(it.getNew().getAttribute("_version")).isEqualTo(1); + }); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void insertDocumentOverwriteModeReplaceWithExternalVersioning(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 12)); + + BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + doc.addAttribute("_version", 1); + collection.insertDocument(doc).get(); + doc.addAttribute("_version", 2); + DocumentCreateEntity updateResult = collection.insertDocument( + doc, + new DocumentCreateOptions() + .overwriteMode(OverwriteMode.replace) + .versionAttribute("_version") + .returnNew(true) + ).get(); + assertThat(updateResult.getNew().getAttribute("_version")).isEqualTo(2); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void insertDocumentOverwriteModeReplaceUpdateWithExternalVersioningFail(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 12)); + + BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + doc.addAttribute("_version", 1); + collection.insertDocument(doc).get(); + doc.addAttribute("_version", 0); + DocumentCreateEntity updateResult = collection.insertDocument( + doc, + new DocumentCreateOptions() + .overwriteMode(OverwriteMode.replace) + .versionAttribute("_version") + .returnNew(true) + ).get(); + assertThat(updateResult.getNew().getAttribute("_version")).isEqualTo(1); + + } + + @ParameterizedTest + @MethodSource("asyncCols") + void insertDocumentsOverwriteModeReplaceWithExternalVersioning(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 12)); + + BaseDocument d1 = new BaseDocument(UUID.randomUUID().toString()); + d1.addAttribute("_version", 1); + BaseDocument d2 = new BaseDocument(UUID.randomUUID().toString()); + d2.addAttribute("_version", 1); + + collection.insertDocuments(Arrays.asList(d1, d2)).get(); + + d1.addAttribute("_version", 2); + d2.addAttribute("_version", 2); + MultiDocumentEntity> updateResult = collection.insertDocuments( + Arrays.asList(d1, d2), + new DocumentCreateOptions() + .overwriteMode(OverwriteMode.replace) + .versionAttribute("_version") + .returnNew(true), + BaseDocument.class + ).get(); + + assertThat(updateResult.getDocuments()).allSatisfy(it -> { + assertThat(it.getNew()).isNotNull(); + assertThat(it.getNew().getAttribute("_version")).isEqualTo(2); + }); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void insertDocumentsOverwriteModeReplaceWithExternalVersioningFail(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 12)); + + BaseDocument d1 = new BaseDocument(UUID.randomUUID().toString()); + d1.addAttribute("_version", 1); + BaseDocument d2 = new BaseDocument(UUID.randomUUID().toString()); + d2.addAttribute("_version", 1); + + collection.insertDocuments(Arrays.asList(d1, d2)).get(); + + d1.addAttribute("_version", 0); + d2.addAttribute("_version", 0); + MultiDocumentEntity> updateResult = collection.insertDocuments( + Arrays.asList(d1, d2), + new DocumentCreateOptions() + .overwriteMode(OverwriteMode.replace) + .versionAttribute("_version") + .returnNew(true), + BaseDocument.class + ).get(); + + assertThat(updateResult.getDocuments()).allSatisfy(it -> { + assertThat(it.getNew()).isNotNull(); + assertThat(it.getNew().getAttribute("_version")).isEqualTo(1); + }); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void insertDocumentWaitForSync(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + final DocumentCreateOptions options = new DocumentCreateOptions().waitForSync(true); + final DocumentCreateEntity doc = collection.insertDocument(new BaseDocument(), options).get(); + assertThat(doc).isNotNull(); + assertThat(doc.getId()).isNotNull(); + assertThat(doc.getKey()).isNotNull(); + assertThat(doc.getRev()).isNotNull(); + assertThat(doc.getNew()).isNull(); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void insertDocumentRefillIndexCaches(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + final DocumentCreateOptions options = new DocumentCreateOptions().refillIndexCaches(true); + final DocumentCreateEntity doc = collection.insertDocument(new BaseDocument(), options).get(); + assertThat(doc).isNotNull(); + assertThat(doc.getId()).isNotNull(); + assertThat(doc.getKey()).isNotNull(); + assertThat(doc.getRev()).isNotNull(); + assertThat(doc.getNew()).isNull(); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void insertDocumentAsJson(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + String key = "doc-" + UUID.randomUUID(); + RawJson rawJson = RawJson.of("{\"_key\":\"" + key + "\",\"a\":\"test\"}"); + final DocumentCreateEntity doc = collection.insertDocument(rawJson).get(); + assertThat(doc).isNotNull(); + assertThat(doc.getId()).isEqualTo(collection.name() + "/" + key); + assertThat(doc.getKey()).isEqualTo(key); + assertThat(doc.getRev()).isNotNull(); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void insertDocumentAsBytes(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + String key = "doc-" + UUID.randomUUID(); + Map doc = new HashMap<>(); + doc.put("_key", key); + doc.put("a", "test"); + byte[] bytes = collection.getSerde().serializeUserData(doc); + RawBytes rawJson = RawBytes.of(bytes); + final DocumentCreateEntity createEntity = collection.insertDocument(rawJson, + new DocumentCreateOptions().returnNew(true)).get(); + assertThat(createEntity).isNotNull(); + assertThat(createEntity.getId()).isEqualTo(collection.name() + "/" + key); + assertThat(createEntity.getKey()).isEqualTo(key); + assertThat(createEntity.getRev()).isNotNull(); + assertThat(createEntity.getNew()).isNotNull().isInstanceOf(RawBytes.class); + Map newDoc = collection.getSerde().getUserSerde().deserialize(createEntity.getNew().get(), Map.class); + assertThat(newDoc).containsAllEntriesOf(doc); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void insertDocumentSilent(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + assumeTrue(isSingleServer()); + final DocumentCreateEntity meta = collection.insertDocument(new BaseDocument(), + new DocumentCreateOptions().silent(true)).get(); + assertThat(meta).isNotNull(); + assertThat(meta.getId()).isNull(); + assertThat(meta.getKey()).isNull(); + assertThat(meta.getRev()).isNull(); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void insertDocumentSilentDontTouchInstance(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + assumeTrue(isSingleServer()); + final BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + final String key = "testkey-" + UUID.randomUUID(); + doc.setKey(key); + final DocumentCreateEntity meta = collection.insertDocument(doc, + new DocumentCreateOptions().silent(true)).get(); + assertThat(meta).isNotNull(); + assertThat(meta.getKey()).isNull(); + assertThat(doc.getKey()).isEqualTo(key); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void insertDocumentsSilent(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + assumeTrue(isSingleServer()); + final MultiDocumentEntity> info = + collection.insertDocuments(Arrays.asList(new BaseDocument(), new BaseDocument()), + new DocumentCreateOptions().silent(true), BaseDocument.class).get(); + assertThat(info).isNotNull(); + assertThat(info.getDocuments()).isEmpty(); + assertThat(info.getDocumentsAndErrors()).isEmpty(); + assertThat(info.getErrors()).isEmpty(); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void insertDocumentsRefillIndexCaches(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + final MultiDocumentEntity> info = + collection.insertDocuments(Arrays.asList(new BaseDocument(), new BaseDocument()), + new DocumentCreateOptions().refillIndexCaches(true), BaseDocument.class).get(); + assertThat(info.getErrors()).isEmpty(); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void getDocument(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + final DocumentCreateEntity createResult = collection.insertDocument(new BaseDocument(), null).get(); + assertThat(createResult.getKey()).isNotNull(); + final BaseDocument readResult = collection.getDocument(createResult.getKey(), BaseDocument.class, null).get(); + assertThat(readResult.getKey()).isEqualTo(createResult.getKey()); + assertThat(readResult.getId()).isEqualTo(COLLECTION_NAME + "/" + createResult.getKey()); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void getDocumentIfMatch(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + final DocumentCreateEntity createResult = collection.insertDocument(new BaseDocument(), null).get(); + assertThat(createResult.getKey()).isNotNull(); + final DocumentReadOptions options = new DocumentReadOptions().ifMatch(createResult.getRev()); + final BaseDocument readResult = collection.getDocument(createResult.getKey(), BaseDocument.class, options).get(); + assertThat(readResult.getKey()).isEqualTo(createResult.getKey()); + assertThat(readResult.getId()).isEqualTo(COLLECTION_NAME + "/" + createResult.getKey()); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void getDocumentIfMatchFail(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + final DocumentCreateEntity createResult = collection.insertDocument(new BaseDocument(), null).get(); + assertThat(createResult.getKey()).isNotNull(); + final DocumentReadOptions options = new DocumentReadOptions().ifMatch("no"); + final BaseDocument document = collection.getDocument(createResult.getKey(), BaseDocument.class, options).get(); + assertThat(document).isNull(); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void getDocumentIfNoneMatch(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + final DocumentCreateEntity createResult = collection.insertDocument(new BaseDocument(), null).get(); + assertThat(createResult.getKey()).isNotNull(); + final DocumentReadOptions options = new DocumentReadOptions().ifNoneMatch("no"); + final BaseDocument readResult = collection.getDocument(createResult.getKey(), BaseDocument.class, options).get(); + assertThat(readResult.getKey()).isEqualTo(createResult.getKey()); + assertThat(readResult.getId()).isEqualTo(COLLECTION_NAME + "/" + createResult.getKey()); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void getDocumentIfNoneMatchFail(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + final DocumentCreateEntity createResult = collection.insertDocument(new BaseDocument(), null).get(); + assertThat(createResult.getKey()).isNotNull(); + final DocumentReadOptions options = new DocumentReadOptions().ifNoneMatch(createResult.getRev()); + final BaseDocument document = collection.getDocument(createResult.getKey(), BaseDocument.class, options).get(); + assertThat(document).isNull(); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void getDocumentAsJson(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + String key = rnd(); + RawJson rawJson = RawJson.of("{\"_key\":\"" + key + "\",\"a\":\"test\"}"); + collection.insertDocument(rawJson).get(); + final RawJson readResult = collection.getDocument(key, RawJson.class).get(); + assertThat(readResult.get()).contains("\"_key\":\"" + key + "\"").contains("\"_id\":\"" + COLLECTION_NAME + "/" + key + "\""); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void getDocumentNotFound(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + final BaseDocument document = collection.getDocument("no", BaseDocument.class).get(); + assertThat(document).isNull(); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void getDocumentNotFoundOptionsDefault(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + final BaseDocument document = collection.getDocument("no", BaseDocument.class, new DocumentReadOptions()).get(); + assertThat(document).isNull(); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void getDocumentNotFoundOptionsNull(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + final BaseDocument document = collection.getDocument("no", BaseDocument.class, null).get(); + assertThat(document).isNull(); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void getDocumentWrongKey(ArangoCollectionAsync collection) { + Throwable thrown = catchThrowable(() -> collection.getDocument("no/no", BaseDocument.class).get()).getCause(); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + } + + @SlowTest + @ParameterizedTest + @MethodSource("asyncCols") + void getDocumentDirtyRead(ArangoCollectionAsync collection) throws InterruptedException, ExecutionException { + final BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + collection.insertDocument(doc, new DocumentCreateOptions()); + Thread.sleep(2000); + final RawJson document = collection.getDocument(doc.getKey(), RawJson.class, + new DocumentReadOptions().allowDirtyRead(true)).get(); + assertThat(document).isNotNull(); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void getDocuments(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + final Collection values = new ArrayList<>(); + values.add(new BaseDocument("1")); + values.add(new BaseDocument("2")); + values.add(new BaseDocument("3")); + collection.insertDocuments(values).get(); + final MultiDocumentEntity documents = collection.getDocuments(Arrays.asList("1", "2", "3"), + BaseDocument.class).get(); + assertThat(documents).isNotNull(); + assertThat(documents.getDocuments()).hasSize(3); + for (final BaseDocument document : documents.getDocuments()) { + assertThat(document.getId()).isIn(COLLECTION_NAME + "/" + "1", COLLECTION_NAME + "/" + "2", + COLLECTION_NAME + "/" + "3"); + } + } + + @ParameterizedTest + @MethodSource("asyncCols") + void getDocumentsWithCustomShardingKey(ArangoCollectionAsync c) throws ExecutionException, InterruptedException { + ArangoCollectionAsync collection = c.db().collection("customShardingKeyCollection"); + if (collection.exists().get()) collection.drop().get(); + + collection.create(new CollectionCreateOptions().shardKeys("customField").numberOfShards(10)).get(); + + List values = + IntStream.range(0, 10).mapToObj(String::valueOf).map(key -> new BaseDocument()).peek(it -> it.addAttribute( + "customField", rnd())).collect(Collectors.toList()); + + MultiDocumentEntity> inserted = collection.insertDocuments(values).get(); + List insertedKeys = + inserted.getDocuments().stream().map(DocumentEntity::getKey).collect(Collectors.toList()); + + final Collection documents = + collection.getDocuments(insertedKeys, BaseDocument.class).get().getDocuments(); + + assertThat(documents).hasSize(10); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void getDocumentsDirtyRead(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + assumeTrue(isCluster()); // skip activefailover + final Collection values = new ArrayList<>(); + values.add(new BaseDocument("1")); + values.add(new BaseDocument("2")); + values.add(new BaseDocument("3")); + collection.insertDocuments(values).get(); + final MultiDocumentEntity documents = collection.getDocuments(Arrays.asList("1", "2", "3"), + BaseDocument.class, new DocumentReadOptions().allowDirtyRead(true)).get(); + assertThat(documents).isNotNull(); + if (isAtLeastVersion(3, 10)) { + assertThat(documents.isPotentialDirtyRead()).isTrue(); + } + assertThat(documents.getDocuments()).hasSize(3); + for (final BaseDocument document : documents.getDocuments()) { + assertThat(document.getId()).isIn(COLLECTION_NAME + "/" + "1", COLLECTION_NAME + "/" + "2", + COLLECTION_NAME + "/" + "3"); + } + } + + @ParameterizedTest + @MethodSource("asyncCols") + void getDocumentsNotFound(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + final MultiDocumentEntity readResult = collection.getDocuments(Collections.singleton("no"), + BaseDocument.class).get(); + assertThat(readResult).isNotNull(); + assertThat(readResult.getDocuments()).isEmpty(); + assertThat(readResult.getErrors()).hasSize(1); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void getDocumentsWrongKey(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + final MultiDocumentEntity readResult = collection.getDocuments(Collections.singleton("no/no"), + BaseDocument.class).get(); + assertThat(readResult).isNotNull(); + assertThat(readResult.getDocuments()).isEmpty(); + assertThat(readResult.getErrors()).hasSize(1); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void updateDocument(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + final BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + doc.addAttribute("a", "test"); + doc.addAttribute("c", "test"); + final DocumentCreateEntity createResult = collection.insertDocument(doc, null).get(); + doc.updateAttribute("a", "test1"); + doc.addAttribute("b", "test"); + doc.updateAttribute("c", null); + final DocumentUpdateEntity updateResult = collection.updateDocument(createResult.getKey(), doc, + null).get(); + assertThat(updateResult).isNotNull(); + assertThat(updateResult.getId()).isEqualTo(createResult.getId()); + assertThat(updateResult.getNew()).isNull(); + assertThat(updateResult.getOld()).isNull(); + assertThat(updateResult.getRev()).isNotEqualTo(updateResult.getOldRev()); + assertThat(updateResult.getOldRev()).isEqualTo(createResult.getRev()); + + final BaseDocument readResult = collection.getDocument(createResult.getKey(), BaseDocument.class, null).get(); + assertThat(readResult.getKey()).isEqualTo(createResult.getKey()); + assertThat(readResult.getAttribute("a")).isNotNull(); + assertThat(String.valueOf(readResult.getAttribute("a"))).isEqualTo("test1"); + assertThat(readResult.getAttribute("b")).isNotNull(); + assertThat(String.valueOf(readResult.getAttribute("b"))).isEqualTo("test"); + assertThat(readResult.getRevision()).isEqualTo(updateResult.getRev()); + assertThat(readResult.getProperties()).containsKey("c"); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void updateDocumentWithDifferentReturnType(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + final String key = "key-" + UUID.randomUUID(); + final BaseDocument doc = new BaseDocument(key); + doc.addAttribute("a", "test"); + collection.insertDocument(doc).get(); + + final DocumentUpdateEntity updateResult = collection.updateDocument(key, + Collections.singletonMap("b", "test"), new DocumentUpdateOptions().returnNew(true), BaseDocument.class).get(); + assertThat(updateResult).isNotNull(); + assertThat(updateResult.getKey()).isEqualTo(key); + BaseDocument updated = updateResult.getNew(); + assertThat(updated).isNotNull(); + assertThat(updated.getAttribute("a")).isEqualTo("test"); + assertThat(updated.getAttribute("b")).isEqualTo("test"); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void updateDocumentUpdateRev(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + final BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + final DocumentCreateEntity createResult = collection.insertDocument(doc, null).get(); + doc.addAttribute("foo", "bar"); + final DocumentUpdateEntity updateResult = collection.updateDocument(createResult.getKey(), doc, + null).get(); + assertThat(doc.getRevision()).isNull(); + assertThat(createResult.getRev()).isNotNull(); + assertThat(updateResult.getRev()) + .isNotNull() + .isNotEqualTo(createResult.getRev()); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void updateDocumentIfMatch(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + final BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + doc.addAttribute("a", "test"); + doc.addAttribute("c", "test"); + final DocumentCreateEntity createResult = collection.insertDocument(doc, null).get(); + doc.updateAttribute("a", "test1"); + doc.addAttribute("b", "test"); + doc.updateAttribute("c", null); + final DocumentUpdateOptions options = new DocumentUpdateOptions().ifMatch(createResult.getRev()); + final DocumentUpdateEntity updateResult = collection.updateDocument(createResult.getKey(), doc, + options).get(); + assertThat(updateResult).isNotNull(); + assertThat(updateResult.getId()).isEqualTo(createResult.getId()); + assertThat(updateResult.getRev()).isNotEqualTo(updateResult.getOldRev()); + assertThat(updateResult.getOldRev()).isEqualTo(createResult.getRev()); + + final BaseDocument readResult = collection.getDocument(createResult.getKey(), BaseDocument.class, null).get(); + assertThat(readResult.getKey()).isEqualTo(createResult.getKey()); + assertThat(readResult.getAttribute("a")).isNotNull(); + assertThat(String.valueOf(readResult.getAttribute("a"))).isEqualTo("test1"); + assertThat(readResult.getAttribute("b")).isNotNull(); + assertThat(String.valueOf(readResult.getAttribute("b"))).isEqualTo("test"); + assertThat(readResult.getRevision()).isEqualTo(updateResult.getRev()); + assertThat(readResult.getProperties()).containsKey("c"); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void updateDocumentIfMatchFail(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + final BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + doc.addAttribute("a", "test"); + doc.addAttribute("c", "test"); + final DocumentCreateEntity createResult = collection.insertDocument(doc, null).get(); + doc.updateAttribute("a", "test1"); + doc.addAttribute("b", "test"); + doc.updateAttribute("c", null); + + final DocumentUpdateOptions options = new DocumentUpdateOptions().ifMatch("no"); + Throwable thrown = catchThrowable(() -> collection.updateDocument(createResult.getKey(), doc, options).get()).getCause(); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void updateDocumentWithExternalVersioning(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 12)); + + BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + doc.addAttribute("_version", 1); + collection.insertDocument(doc).get(); + doc.addAttribute("_version", 2); + DocumentUpdateEntity updateResult = collection.updateDocument( + doc.getKey(), + doc, + new DocumentUpdateOptions().versionAttribute("_version").returnNew(true) + ).get(); + assertThat(updateResult.getNew().getAttribute("_version")).isEqualTo(2); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void updateDocumentWithExternalVersioningFail(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 12)); + + BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + doc.addAttribute("_version", 1); + collection.insertDocument(doc).get(); + doc.addAttribute("_version", 0); + DocumentUpdateEntity updateResult = collection.updateDocument( + doc.getKey(), + doc, + new DocumentUpdateOptions().versionAttribute("_version").returnNew(true) + ).get(); + assertThat(updateResult.getNew().getAttribute("_version")).isEqualTo(1); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void updateDocumentsWithExternalVersioning(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 12)); + + BaseDocument d1 = new BaseDocument(UUID.randomUUID().toString()); + d1.addAttribute("_version", 1); + BaseDocument d2 = new BaseDocument(UUID.randomUUID().toString()); + d2.addAttribute("_version", 1); + + collection.insertDocuments(Arrays.asList(d1, d2)).get(); + + d1.addAttribute("_version", 2); + d2.addAttribute("_version", 2); + MultiDocumentEntity> updateResult = collection.updateDocuments( + Arrays.asList(d1, d2), + new DocumentUpdateOptions().versionAttribute("_version").returnNew(true), + BaseDocument.class + ).get(); + + assertThat(updateResult.getDocuments()).allSatisfy(it -> { + assertThat(it.getNew()).isNotNull(); + assertThat(it.getNew().getAttribute("_version")).isEqualTo(2); + }); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void updateDocumentsWithExternalVersioningFail(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 12)); + + BaseDocument d1 = new BaseDocument(UUID.randomUUID().toString()); + d1.addAttribute("_version", 1); + BaseDocument d2 = new BaseDocument(UUID.randomUUID().toString()); + d2.addAttribute("_version", 1); + + collection.insertDocuments(Arrays.asList(d1, d2)).get(); + + d1.addAttribute("_version", 0); + d2.addAttribute("_version", 0); + MultiDocumentEntity> updateResult = collection.updateDocuments( + Arrays.asList(d1, d2), + new DocumentUpdateOptions().versionAttribute("_version").returnNew(true), + BaseDocument.class + ).get(); + + assertThat(updateResult.getDocuments()).allSatisfy(it -> { + assertThat(it.getNew()).isNotNull(); + assertThat(it.getNew().getAttribute("_version")).isEqualTo(1); + }); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void updateDocumentReturnNew(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + final BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + doc.addAttribute("a", "test"); + final DocumentCreateEntity createResult = collection.insertDocument(doc, null).get(); + doc.updateAttribute("a", "test1"); + doc.addAttribute("b", "test"); + final DocumentUpdateOptions options = new DocumentUpdateOptions().returnNew(true); + final DocumentUpdateEntity updateResult = collection.updateDocument(createResult.getKey(), doc, + options).get(); + assertThat(updateResult).isNotNull(); + assertThat(updateResult.getId()).isEqualTo(createResult.getId()); + assertThat(updateResult.getOldRev()).isEqualTo(createResult.getRev()); + assertThat(updateResult.getNew()).isNotNull(); + assertThat(updateResult.getNew().getKey()).isEqualTo(createResult.getKey()); + assertThat(updateResult.getNew().getRevision()).isNotEqualTo(createResult.getRev()); + assertThat(updateResult.getNew().getAttribute("a")).isNotNull(); + assertThat(String.valueOf(updateResult.getNew().getAttribute("a"))).isEqualTo("test1"); + assertThat(updateResult.getNew().getAttribute("b")).isNotNull(); + assertThat(String.valueOf(updateResult.getNew().getAttribute("b"))).isEqualTo("test"); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void updateDocumentReturnOld(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + final BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + doc.addAttribute("a", "test"); + final DocumentCreateEntity createResult = collection.insertDocument(doc, null).get(); + doc.updateAttribute("a", "test1"); + doc.addAttribute("b", "test"); + final DocumentUpdateOptions options = new DocumentUpdateOptions().returnOld(true); + final DocumentUpdateEntity updateResult = collection.updateDocument(createResult.getKey(), doc, + options).get(); + assertThat(updateResult).isNotNull(); + assertThat(updateResult.getId()).isEqualTo(createResult.getId()); + assertThat(updateResult.getOldRev()).isEqualTo(createResult.getRev()); + assertThat(updateResult.getOld()).isNotNull(); + assertThat(updateResult.getOld().getKey()).isEqualTo(createResult.getKey()); + assertThat(updateResult.getOld().getRevision()).isEqualTo(createResult.getRev()); + assertThat(updateResult.getOld().getAttribute("a")).isNotNull(); + assertThat(String.valueOf(updateResult.getOld().getAttribute("a"))).isEqualTo("test"); + assertThat(updateResult.getOld().getProperties().keySet()).doesNotContain("b"); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void updateDocumentKeepNullTrue(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + final BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + doc.addAttribute("a", "test"); + final DocumentCreateEntity createResult = collection.insertDocument(doc, null).get(); + doc.updateAttribute("a", null); + final DocumentUpdateOptions options = new DocumentUpdateOptions().keepNull(true); + final DocumentUpdateEntity updateResult = collection.updateDocument(createResult.getKey(), doc, + options).get(); + assertThat(updateResult).isNotNull(); + assertThat(updateResult.getId()).isEqualTo(createResult.getId()); + assertThat(updateResult.getRev()).isNotEqualTo(updateResult.getOldRev()); + assertThat(updateResult.getOldRev()).isEqualTo(createResult.getRev()); + + final BaseDocument readResult = collection.getDocument(createResult.getKey(), BaseDocument.class, null).get(); + assertThat(readResult.getKey()).isEqualTo(createResult.getKey()); + assertThat(readResult.getProperties()).containsKey("a"); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void updateDocumentKeepNullFalse(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + final BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + doc.addAttribute("a", "test"); + final DocumentCreateEntity createResult = collection.insertDocument(doc, null).get(); + doc.updateAttribute("a", null); + final DocumentUpdateOptions options = new DocumentUpdateOptions().keepNull(false); + final DocumentUpdateEntity updateResult = collection.updateDocument(createResult.getKey(), doc, + options).get(); + assertThat(updateResult).isNotNull(); + assertThat(updateResult.getId()).isEqualTo(createResult.getId()); + assertThat(updateResult.getRev()).isNotEqualTo(updateResult.getOldRev()); + assertThat(updateResult.getOldRev()).isEqualTo(createResult.getRev()); + + final BaseDocument readResult = collection.getDocument(createResult.getKey(), BaseDocument.class, null).get(); + assertThat(readResult.getKey()).isEqualTo(createResult.getKey()); + assertThat(readResult.getId()).isEqualTo(createResult.getId()); + assertThat(readResult.getRevision()).isNotNull(); + assertThat(readResult.getProperties().keySet()).doesNotContain("a"); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void updateDocumentSerializeNullTrue(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + final TestUpdateEntity doc = new TestUpdateEntity(); + doc.a = "foo"; + doc.b = "foo"; + final DocumentCreateEntity createResult = collection.insertDocument(doc).get(); + final TestUpdateEntity patchDoc = new TestUpdateEntity(); + patchDoc.a = "bar"; + final DocumentUpdateEntity updateResult = collection.updateDocument(createResult.getKey(), patchDoc).get(); + assertThat(updateResult).isNotNull(); + assertThat(updateResult.getKey()).isEqualTo(createResult.getKey()); + + final BaseDocument readResult = collection.getDocument(createResult.getKey(), BaseDocument.class).get(); + assertThat(readResult.getKey()).isEqualTo(createResult.getKey()); + assertThat(readResult.getProperties()).containsKey("a"); + assertThat(readResult.getAttribute("a")).isEqualTo("bar"); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void updateDocumentSerializeNullFalse(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + final TestUpdateEntitySerializeNullFalse doc = new TestUpdateEntitySerializeNullFalse(); + doc.a = "foo"; + doc.b = "foo"; + final DocumentCreateEntity createResult = collection.insertDocument(doc).get(); + final TestUpdateEntitySerializeNullFalse patchDoc = new TestUpdateEntitySerializeNullFalse(); + patchDoc.a = "bar"; + final DocumentUpdateEntity updateResult = collection.updateDocument(createResult.getKey(), patchDoc).get(); + assertThat(updateResult).isNotNull(); + assertThat(updateResult.getKey()).isEqualTo(createResult.getKey()); + + final BaseDocument readResult = collection.getDocument(createResult.getKey(), BaseDocument.class).get(); + assertThat(readResult.getKey()).isEqualTo(createResult.getKey()); + assertThat(readResult.getProperties()).containsKeys("a", "b"); + assertThat(readResult.getAttribute("a")).isEqualTo("bar"); + assertThat(readResult.getAttribute("b")).isEqualTo("foo"); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void updateDocumentMergeObjectsTrue(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + final BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + final Map a = new HashMap<>(); + a.put("a", "test"); + doc.addAttribute("a", a); + final DocumentCreateEntity createResult = collection.insertDocument(doc, null).get(); + a.clear(); + a.put("b", "test"); + doc.updateAttribute("a", a); + final DocumentUpdateOptions options = new DocumentUpdateOptions().mergeObjects(true); + final DocumentUpdateEntity updateResult = collection.updateDocument(createResult.getKey(), doc, + options).get(); + assertThat(updateResult).isNotNull(); + assertThat(updateResult.getId()).isEqualTo(createResult.getId()); + assertThat(updateResult.getRev()).isNotEqualTo(updateResult.getOldRev()); + assertThat(updateResult.getOldRev()).isEqualTo(createResult.getRev()); + + final BaseDocument readResult = collection.getDocument(createResult.getKey(), BaseDocument.class, null).get(); + assertThat(readResult.getKey()).isEqualTo(createResult.getKey()); + final Object aResult = readResult.getAttribute("a"); + assertThat(aResult).isInstanceOf(Map.class); + final Map aMap = (Map) aResult; + assertThat(aMap).containsKeys("a", "b"); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void updateDocumentMergeObjectsFalse(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + final BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + final Map a = new HashMap<>(); + a.put("a", "test"); + doc.addAttribute("a", a); + final DocumentCreateEntity createResult = collection.insertDocument(doc, null).get(); + a.clear(); + a.put("b", "test"); + doc.updateAttribute("a", a); + final DocumentUpdateOptions options = new DocumentUpdateOptions().mergeObjects(false); + final DocumentUpdateEntity updateResult = collection.updateDocument(createResult.getKey(), doc, + options).get(); + assertThat(updateResult).isNotNull(); + assertThat(updateResult.getId()).isEqualTo(createResult.getId()); + assertThat(updateResult.getRev()).isNotEqualTo(updateResult.getOldRev()); + assertThat(updateResult.getOldRev()).isEqualTo(createResult.getRev()); + + final BaseDocument readResult = collection.getDocument(createResult.getKey(), BaseDocument.class, null).get(); + assertThat(readResult.getKey()).isEqualTo(createResult.getKey()); + final Object aResult = readResult.getAttribute("a"); + assertThat(aResult).isInstanceOf(Map.class); + final Map aMap = (Map) aResult; + assertThat(aMap.keySet()).doesNotContain("a"); + assertThat(aMap).containsKey("b"); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void updateDocumentIgnoreRevsFalse(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + final BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + doc.addAttribute("a", "test"); + final DocumentCreateEntity createResult = collection.insertDocument(doc, null).get(); + doc.updateAttribute("a", "test1"); + doc.setRevision("no"); + + final DocumentUpdateOptions options = new DocumentUpdateOptions().ignoreRevs(false); + Throwable thrown = catchThrowable(() -> collection.updateDocument(createResult.getKey(), doc, options).get()).getCause(); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void updateDocumentSilent(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + assumeTrue(isSingleServer()); + final DocumentCreateEntity createResult = collection.insertDocument(new BaseDocument()).get(); + final DocumentUpdateEntity meta = collection.updateDocument(createResult.getKey(), + new BaseDocument(), new DocumentUpdateOptions().silent(true)).get(); + assertThat(meta).isNotNull(); + assertThat(meta.getId()).isNull(); + assertThat(meta.getKey()).isNull(); + assertThat(meta.getRev()).isNull(); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void updateDocumentsSilent(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + assumeTrue(isSingleServer()); + final DocumentCreateEntity createResult = collection.insertDocument(new BaseDocument()).get(); + final MultiDocumentEntity> info = + collection.updateDocuments(Collections.singletonList(new BaseDocument(createResult.getKey())), + new DocumentUpdateOptions().silent(true), BaseDocument.class).get(); + assertThat(info).isNotNull(); + assertThat(info.getDocuments()).isEmpty(); + assertThat(info.getDocumentsAndErrors()).isEmpty(); + assertThat(info.getErrors()).isEmpty(); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void updateNonExistingDocument(ArangoCollectionAsync collection) { + final BaseDocument doc = new BaseDocument("test-" + rnd()); + doc.addAttribute("a", "test"); + doc.addAttribute("c", "test"); + + Throwable thrown = catchThrowable(() -> collection.updateDocument(doc.getKey(), doc, null).get()).getCause(); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + ArangoDBException e = (ArangoDBException) thrown; + assertThat(e.getResponseCode()).isEqualTo(404); + assertThat(e.getErrorNum()).isEqualTo(1202); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void updateDocumentPreconditionFailed(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + final BaseDocument doc = new BaseDocument("test-" + rnd()); + doc.addAttribute("foo", "a"); + final DocumentCreateEntity createResult = collection.insertDocument(doc, null).get(); + + doc.updateAttribute("foo", "b"); + collection.updateDocument(doc.getKey(), doc, null).get(); + + doc.updateAttribute("foo", "c"); + Throwable thrown = catchThrowable(() -> collection.updateDocument(doc.getKey(), doc, + new DocumentUpdateOptions().ifMatch(createResult.getRev())).get()).getCause(); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + ArangoDBException e = (ArangoDBException) thrown; + assertThat(e.getResponseCode()).isEqualTo(412); + assertThat(e.getErrorNum()).isEqualTo(1200); + BaseDocument readDocument = collection.getDocument(doc.getKey(), BaseDocument.class).get(); + assertThat(readDocument.getAttribute("foo")).isEqualTo("b"); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void updateDocumentRefillIndexCaches(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + BaseDocument doc = new BaseDocument(); + DocumentCreateEntity createResult = collection.insertDocument(doc).get(); + doc.addAttribute("foo", "bar"); + DocumentUpdateEntity updateResult = collection.updateDocument(createResult.getKey(), + doc, new DocumentUpdateOptions().refillIndexCaches(true)).get(); + assertThat(updateResult.getRev()) + .isNotNull() + .isNotEqualTo(createResult.getRev()); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void updateDocumentsRefillIndexCaches(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + final DocumentCreateEntity createResult = collection.insertDocument(new BaseDocument()).get(); + final MultiDocumentEntity> info = + collection.updateDocuments(Collections.singletonList(new BaseDocument(createResult.getKey())), + new DocumentUpdateOptions().refillIndexCaches(true), BaseDocument.class).get(); + assertThat(info.getErrors()).isEmpty(); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void replaceDocument(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + final BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + doc.addAttribute("a", "test"); + final DocumentCreateEntity createResult = collection.insertDocument(doc, null).get(); + doc.removeAttribute("a"); + doc.addAttribute("b", "test"); + final DocumentUpdateEntity replaceResult = collection.replaceDocument(createResult.getKey(), + doc, null).get(); + assertThat(replaceResult).isNotNull(); + assertThat(replaceResult.getId()).isEqualTo(createResult.getId()); + assertThat(replaceResult.getNew()).isNull(); + assertThat(replaceResult.getOld()).isNull(); + assertThat(replaceResult.getRev()).isNotEqualTo(replaceResult.getOldRev()); + assertThat(replaceResult.getOldRev()).isEqualTo(createResult.getRev()); + + final BaseDocument readResult = collection.getDocument(createResult.getKey(), BaseDocument.class, null).get(); + assertThat(readResult.getKey()).isEqualTo(createResult.getKey()); + assertThat(readResult.getRevision()).isEqualTo(replaceResult.getRev()); + assertThat(readResult.getProperties().keySet()).doesNotContain("a"); + assertThat(readResult.getAttribute("b")).isNotNull(); + assertThat(String.valueOf(readResult.getAttribute("b"))).isEqualTo("test"); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void replaceDocumentUpdateRev(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + final BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + final DocumentCreateEntity createResult = collection.insertDocument(doc, null).get(); + final DocumentUpdateEntity replaceResult = collection.replaceDocument(createResult.getKey(), + doc, null).get(); + assertThat(doc.getRevision()).isNull(); + assertThat(createResult.getRev()).isNotNull(); + assertThat(replaceResult.getRev()) + .isNotNull() + .isNotEqualTo(createResult.getRev()); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void replaceDocumentIfMatch(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + final BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + doc.addAttribute("a", "test"); + final DocumentCreateEntity createResult = collection.insertDocument(doc, null).get(); + doc.removeAttribute("a"); + doc.addAttribute("b", "test"); + final DocumentReplaceOptions options = new DocumentReplaceOptions().ifMatch(createResult.getRev()); + final DocumentUpdateEntity replaceResult = collection.replaceDocument(createResult.getKey(), + doc, options).get(); + assertThat(replaceResult).isNotNull(); + assertThat(replaceResult.getId()).isEqualTo(createResult.getId()); + assertThat(replaceResult.getRev()).isNotEqualTo(replaceResult.getOldRev()); + assertThat(replaceResult.getOldRev()).isEqualTo(createResult.getRev()); + + final BaseDocument readResult = collection.getDocument(createResult.getKey(), BaseDocument.class, null).get(); + assertThat(readResult.getKey()).isEqualTo(createResult.getKey()); + assertThat(readResult.getRevision()).isEqualTo(replaceResult.getRev()); + assertThat(readResult.getProperties().keySet()).doesNotContain("a"); + assertThat(readResult.getAttribute("b")).isNotNull(); + assertThat(String.valueOf(readResult.getAttribute("b"))).isEqualTo("test"); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void replaceDocumentIfMatchFail(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + final BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + doc.addAttribute("a", "test"); + final DocumentCreateEntity createResult = collection.insertDocument(doc, null).get(); + doc.removeAttribute("a"); + doc.addAttribute("b", "test"); + + final DocumentReplaceOptions options = new DocumentReplaceOptions().ifMatch("no"); + Throwable thrown = catchThrowable(() -> collection.replaceDocument(createResult.getKey(), doc, options).get()).getCause(); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + + } + + @ParameterizedTest + @MethodSource("asyncCols") + void replaceDocumentIgnoreRevsFalse(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + final BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + doc.addAttribute("a", "test"); + final DocumentCreateEntity createResult = collection.insertDocument(doc, null).get(); + doc.removeAttribute("a"); + doc.addAttribute("b", "test"); + doc.setRevision("no"); + + final DocumentReplaceOptions options = new DocumentReplaceOptions().ignoreRevs(false); + Throwable thrown = catchThrowable(() -> collection.replaceDocument(createResult.getKey(), doc, options).get()).getCause(); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void replaceDocumentWithExternalVersioning(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 12)); + + BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + doc.addAttribute("_version", 1); + collection.insertDocument(doc).get(); + doc.addAttribute("_version", 2); + DocumentUpdateEntity replaceResult = collection.replaceDocument( + doc.getKey(), + doc, + new DocumentReplaceOptions().versionAttribute("_version").returnNew(true) + ).get(); + assertThat(replaceResult.getNew().getAttribute("_version")).isEqualTo(2); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void replaceDocumentWithExternalVersioningFail(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 12)); + + BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + doc.addAttribute("_version", 1); + collection.insertDocument(doc).get(); + doc.addAttribute("_version", 0); + DocumentUpdateEntity replaceResult = collection.replaceDocument( + doc.getKey(), + doc, + new DocumentReplaceOptions().versionAttribute("_version").returnNew(true) + ).get(); + assertThat(replaceResult.getNew().getAttribute("_version")).isEqualTo(1); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void replaceDocumentsWithExternalVersioning(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 12)); + + BaseDocument d1 = new BaseDocument(UUID.randomUUID().toString()); + d1.addAttribute("_version", 1); + BaseDocument d2 = new BaseDocument(UUID.randomUUID().toString()); + d2.addAttribute("_version", 1); + + collection.insertDocuments(Arrays.asList(d1, d2)).get(); + + d1.addAttribute("_version", 2); + d2.addAttribute("_version", 2); + MultiDocumentEntity> replaceResult = collection.replaceDocuments( + Arrays.asList(d1, d2), + new DocumentReplaceOptions().versionAttribute("_version").returnNew(true), + BaseDocument.class + ).get(); + + assertThat(replaceResult.getDocuments()).allSatisfy(it -> { + assertThat(it.getNew()).isNotNull(); + assertThat(it.getNew().getAttribute("_version")).isEqualTo(2); + }); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void replaceDocumentsWithExternalVersioningFail(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 12)); + + BaseDocument d1 = new BaseDocument(UUID.randomUUID().toString()); + d1.addAttribute("_version", 1); + BaseDocument d2 = new BaseDocument(UUID.randomUUID().toString()); + d2.addAttribute("_version", 1); + + collection.insertDocuments(Arrays.asList(d1, d2)).get(); + + d1.addAttribute("_version", 0); + d2.addAttribute("_version", 0); + MultiDocumentEntity> replaceResult = collection.replaceDocuments( + Arrays.asList(d1, d2), + new DocumentReplaceOptions().versionAttribute("_version").returnNew(true), + BaseDocument.class + ).get(); + + assertThat(replaceResult.getDocuments()).allSatisfy(it -> { + assertThat(it.getNew()).isNotNull(); + assertThat(it.getNew().getAttribute("_version")).isEqualTo(1); + }); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void replaceDocumentReturnNew(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + final BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + doc.addAttribute("a", "test"); + final DocumentCreateEntity createResult = collection.insertDocument(doc, null).get(); + doc.removeAttribute("a"); + doc.addAttribute("b", "test"); + final DocumentReplaceOptions options = new DocumentReplaceOptions().returnNew(true); + final DocumentUpdateEntity replaceResult = collection.replaceDocument(createResult.getKey(), + doc, options).get(); + assertThat(replaceResult).isNotNull(); + assertThat(replaceResult.getId()).isEqualTo(createResult.getId()); + assertThat(replaceResult.getOldRev()).isEqualTo(createResult.getRev()); + assertThat(replaceResult.getNew()).isNotNull(); + assertThat(replaceResult.getNew().getKey()).isEqualTo(createResult.getKey()); + assertThat(replaceResult.getNew().getRevision()).isNotEqualTo(createResult.getRev()); + assertThat(replaceResult.getNew().getProperties().keySet()).doesNotContain("a"); + assertThat(replaceResult.getNew().getAttribute("b")).isNotNull(); + assertThat(String.valueOf(replaceResult.getNew().getAttribute("b"))).isEqualTo("test"); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void replaceDocumentReturnOld(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + final BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + doc.addAttribute("a", "test"); + final DocumentCreateEntity createResult = collection.insertDocument(doc, null).get(); + doc.removeAttribute("a"); + doc.addAttribute("b", "test"); + final DocumentReplaceOptions options = new DocumentReplaceOptions().returnOld(true); + final DocumentUpdateEntity replaceResult = collection.replaceDocument(createResult.getKey(), + doc, options).get(); + assertThat(replaceResult).isNotNull(); + assertThat(replaceResult.getId()).isEqualTo(createResult.getId()); + assertThat(replaceResult.getOldRev()).isEqualTo(createResult.getRev()); + assertThat(replaceResult.getOld()).isNotNull(); + assertThat(replaceResult.getOld().getKey()).isEqualTo(createResult.getKey()); + assertThat(replaceResult.getOld().getRevision()).isEqualTo(createResult.getRev()); + assertThat(replaceResult.getOld().getAttribute("a")).isNotNull(); + assertThat(String.valueOf(replaceResult.getOld().getAttribute("a"))).isEqualTo("test"); + assertThat(replaceResult.getOld().getProperties().keySet()).doesNotContain("b"); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void replaceDocumentSilent(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + assumeTrue(isSingleServer()); + final DocumentCreateEntity createResult = collection.insertDocument(new BaseDocument()).get(); + final DocumentUpdateEntity meta = collection.replaceDocument(createResult.getKey(), + new BaseDocument(), new DocumentReplaceOptions().silent(true)).get(); + assertThat(meta).isNotNull(); + assertThat(meta.getId()).isNull(); + assertThat(meta.getKey()).isNull(); + assertThat(meta.getRev()).isNull(); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void replaceDocumentSilentDontTouchInstance(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + assumeTrue(isSingleServer()); + final BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + final DocumentCreateEntity createResult = collection.insertDocument(doc).get(); + final DocumentUpdateEntity meta = collection.replaceDocument(createResult.getKey(), doc, + new DocumentReplaceOptions().silent(true)).get(); + assertThat(meta.getRev()).isNull(); + assertThat(doc.getRevision()).isNull(); + assertThat(createResult.getRev()).isNotNull(); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void replaceDocumentsSilent(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + assumeTrue(isSingleServer()); + final DocumentCreateEntity createResult = collection.insertDocument(new BaseDocument()).get(); + final MultiDocumentEntity> info = + collection.replaceDocuments(Collections.singletonList(new BaseDocument(createResult.getKey())), + new DocumentReplaceOptions().silent(true), BaseDocument.class).get(); + assertThat(info).isNotNull(); + assertThat(info.getDocuments()).isEmpty(); + assertThat(info.getDocumentsAndErrors()).isEmpty(); + assertThat(info.getErrors()).isEmpty(); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void replaceDocumentRefillIndexCaches(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + final BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + final DocumentCreateEntity createResult = collection.insertDocument(doc).get(); + final DocumentUpdateEntity replaceResult = collection.replaceDocument(createResult.getKey(), doc, + new DocumentReplaceOptions().refillIndexCaches(true)).get(); + assertThat(replaceResult.getRev()) + .isNotNull() + .isNotEqualTo(createResult.getRev()); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void replaceDocumentsRefillIndexCaches(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + final DocumentCreateEntity createResult = collection.insertDocument(new BaseDocument()).get(); + final MultiDocumentEntity> info = + collection.replaceDocuments(Collections.singletonList(new BaseDocument(createResult.getKey())), + new DocumentReplaceOptions().refillIndexCaches(true), BaseDocument.class).get(); + assertThat(info.getErrors()).isEmpty(); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void deleteDocument(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + final BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + final DocumentCreateEntity createResult = collection.insertDocument(doc, null).get(); + collection.deleteDocument(createResult.getKey()).get(); + final BaseDocument document = collection.getDocument(createResult.getKey(), BaseDocument.class, null).get(); + assertThat(document).isNull(); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void deleteDocumentReturnOld(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + final BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + doc.addAttribute("a", "test"); + final DocumentCreateEntity createResult = collection.insertDocument(doc, null).get(); + final DocumentDeleteOptions options = new DocumentDeleteOptions().returnOld(true); + final DocumentDeleteEntity deleteResult = collection.deleteDocument(createResult.getKey(), + options, BaseDocument.class).get(); + assertThat(deleteResult.getOld()).isNotNull(); + assertThat(deleteResult.getOld()).isInstanceOf(BaseDocument.class); + assertThat(deleteResult.getOld().getAttribute("a")).isNotNull(); + assertThat(String.valueOf(deleteResult.getOld().getAttribute("a"))).isEqualTo("test"); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void deleteDocumentIfMatch(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + final BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + final DocumentCreateEntity createResult = collection.insertDocument(doc, null).get(); + final DocumentDeleteOptions options = new DocumentDeleteOptions().ifMatch(createResult.getRev()); + collection.deleteDocument(createResult.getKey(), options).get(); + final BaseDocument document = collection.getDocument(createResult.getKey(), BaseDocument.class, null).get(); + assertThat(document).isNull(); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void deleteDocumentIfMatchFail(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + final BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + final DocumentCreateEntity createResult = collection.insertDocument(doc).get(); + final DocumentDeleteOptions options = new DocumentDeleteOptions().ifMatch("no"); + Throwable thrown = catchThrowable(() -> collection.deleteDocument(createResult.getKey(), options).get()).getCause(); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void deleteDocuments(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + DocumentCreateEntity a = collection.insertDocument(new BaseDocument()).get(); + DocumentCreateEntity b = collection.insertDocument(new BaseDocument()).get(); + MultiDocumentEntity> info = collection.deleteDocuments( + Arrays.asList(a.getKey(), b.getKey())).get(); + assertThat(info).isNotNull(); + assertThat(info.getDocuments()).hasSize(2); + assertThat(info.getErrors()).isEmpty(); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void deleteDocumentsWithRevs(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + DocumentCreateEntity a = collection.insertDocument(new BaseDocument()).get(); + DocumentCreateEntity b = collection.insertDocument(new BaseDocument()).get(); + MultiDocumentEntity> info = collection.deleteDocuments( + Arrays.asList( + JsonNodeFactory.instance.objectNode() + .put("_key", a.getKey()) + .put("_rev", a.getRev()), + JsonNodeFactory.instance.objectNode() + .put("_key", b.getKey()) + .put("_rev", "wrong") + ), new DocumentDeleteOptions().ignoreRevs(false)).get(); + assertThat(info).isNotNull(); + assertThat(info.getDocuments()).hasSize(1); + assertThat(info.getDocuments().get(0).getKey()).isEqualTo(a.getKey()); + assertThat(info.getErrors()).hasSize(1); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void deleteDocumentSilent(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + assumeTrue(isSingleServer()); + final DocumentCreateEntity createResult = collection.insertDocument(new BaseDocument()).get(); + final DocumentDeleteEntity meta = collection.deleteDocument(createResult.getKey(), + new DocumentDeleteOptions().silent(true), BaseDocument.class).get(); + assertThat(meta).isNotNull(); + assertThat(meta.getId()).isNull(); + assertThat(meta.getKey()).isNull(); + assertThat(meta.getRev()).isNull(); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void deleteDocumentsSilent(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + assumeTrue(isSingleServer()); + final DocumentCreateEntity createResult = collection.insertDocument(new BaseDocument()).get(); + final MultiDocumentEntity> info = collection.deleteDocuments( + Collections.singletonList(createResult.getKey()), + new DocumentDeleteOptions().silent(true), + BaseDocument.class).get(); + assertThat(info).isNotNull(); + assertThat(info.getDocuments()).isEmpty(); + assertThat(info.getDocumentsAndErrors()).isEmpty(); + assertThat(info.getErrors()).isEmpty(); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void deleteDocumentRefillIndexCaches(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + DocumentCreateEntity createResult = collection.insertDocument(new BaseDocument()).get(); + DocumentDeleteEntity deleteResult = collection.deleteDocument(createResult.getKey(), + new DocumentDeleteOptions().refillIndexCaches(true)).get(); + assertThat(deleteResult.getRev()) + .isNotNull() + .isEqualTo(createResult.getRev()); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void deleteDocumentsRefillIndexCaches(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + assumeTrue(isSingleServer()); + final DocumentCreateEntity createResult = collection.insertDocument(new BaseDocument()).get(); + final MultiDocumentEntity> info = collection.deleteDocuments( + Collections.singletonList(createResult.getKey()), + new DocumentDeleteOptions().refillIndexCaches(true), + BaseDocument.class).get(); + assertThat(info.getErrors()).isEmpty(); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void getIndex(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + final Collection fields = new ArrayList<>(); + fields.add("a"); + final IndexEntity createResult = collection.ensurePersistentIndex(fields, null).get(); + final IndexEntity readResult = collection.getIndex(createResult.getId()).get(); + assertThat(readResult.getId()).isEqualTo(createResult.getId()); + assertThat(readResult.getType()).isEqualTo(createResult.getType()); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void getIndexByKey(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + final Collection fields = new ArrayList<>(); + fields.add("a"); + final IndexEntity createResult = collection.ensurePersistentIndex(fields, null).get(); + final IndexEntity readResult = collection.getIndex(createResult.getId().split("/")[1]).get(); + assertThat(readResult.getId()).isEqualTo(createResult.getId()); + assertThat(readResult.getType()).isEqualTo(createResult.getType()); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void deleteIndex(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + final Collection fields = new ArrayList<>(); + fields.add("a"); + final IndexEntity createResult = collection.ensurePersistentIndex(fields, null).get(); + final String id = collection.deleteIndex(createResult.getId()).get(); + assertThat(id).isEqualTo(createResult.getId()); + Throwable thrown = catchThrowable(() -> collection.db().getIndex(id).get()).getCause(); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void deleteIndexByKey(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + final Collection fields = new ArrayList<>(); + fields.add("a"); + final IndexEntity createResult = collection.ensurePersistentIndex(fields, null).get(); + final String id = collection.deleteIndex(createResult.getId().split("/")[1]).get(); + assertThat(id).isEqualTo(createResult.getId()); + Throwable thrown = catchThrowable(() -> collection.db().getIndex(id).get()).getCause(); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void createGeoIndex(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + String f1 = "field-" + rnd(); + final Collection fields = Collections.singletonList(f1); + final IndexEntity indexResult = collection.ensureGeoIndex(fields, null).get(); + assertThat(indexResult).isNotNull(); + assertThat(indexResult.getFields()).contains(f1); + assertThat(indexResult.getId()).startsWith(COLLECTION_NAME); + assertThat(indexResult.getIsNewlyCreated()).isTrue(); + assertThat(indexResult.getMinLength()).isNull(); + assertThat(indexResult.getSparse()).isTrue(); + assertThat(indexResult.getUnique()).isFalse(); + if (isAtLeastVersion(3, 4)) { + assertThat(indexResult.getType()).isEqualTo(IndexType.geo); + } else { + assertThat(indexResult.getType()).isEqualTo(IndexType.geo1); + } + if (isAtLeastVersion(3, 10)) { + assertThat(indexResult.getLegacyPolygons()).isFalse(); + } + } + + @ParameterizedTest + @MethodSource("asyncCols") + void createGeoIndexWithOptions(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 5)); + + String name = "geoIndex-" + rnd(); + final GeoIndexOptions options = new GeoIndexOptions(); + options.name(name); + + String f1 = "field-" + rnd(); + final Collection fields = Collections.singletonList(f1); + final IndexEntity indexResult = collection.ensureGeoIndex(fields, options).get(); + assertThat(indexResult).isNotNull(); + assertThat(indexResult.getFields()).contains(f1); + assertThat(indexResult.getId()).startsWith(COLLECTION_NAME); + assertThat(indexResult.getIsNewlyCreated()).isTrue(); + assertThat(indexResult.getMinLength()).isNull(); + assertThat(indexResult.getSparse()).isTrue(); + assertThat(indexResult.getUnique()).isFalse(); + if (isAtLeastVersion(3, 4)) { + assertThat(indexResult.getType()).isEqualTo(IndexType.geo); + } else { + assertThat(indexResult.getType()).isEqualTo(IndexType.geo1); + } + assertThat(indexResult.getName()).isEqualTo(name); + if (isAtLeastVersion(3, 10)) { + assertThat(indexResult.getLegacyPolygons()).isFalse(); + } + } + + @ParameterizedTest + @MethodSource("asyncCols") + void createGeoIndexLegacyPolygons(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 10)); + + String name = "geoIndex-" + rnd(); + final GeoIndexOptions options = new GeoIndexOptions(); + options.name(name); + options.legacyPolygons(true); + + String f1 = "field-" + rnd(); + final Collection fields = Collections.singletonList(f1); + final IndexEntity indexResult = collection.ensureGeoIndex(fields, options).get(); + assertThat(indexResult).isNotNull(); + assertThat(indexResult.getFields()).contains(f1); + assertThat(indexResult.getId()).startsWith(COLLECTION_NAME); + assertThat(indexResult.getIsNewlyCreated()).isTrue(); + assertThat(indexResult.getMinLength()).isNull(); + assertThat(indexResult.getSparse()).isTrue(); + assertThat(indexResult.getUnique()).isFalse(); + if (isAtLeastVersion(3, 4)) { + assertThat(indexResult.getType()).isEqualTo(IndexType.geo); + } else { + assertThat(indexResult.getType()).isEqualTo(IndexType.geo1); + } + assertThat(indexResult.getName()).isEqualTo(name); + if (isAtLeastVersion(3, 10)) { + assertThat(indexResult.getLegacyPolygons()).isTrue(); + } + } + + @ParameterizedTest + @MethodSource("asyncCols") + void createGeo2Index(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + String f1 = "field-" + rnd(); + String f2 = "field-" + rnd(); + final Collection fields = Arrays.asList(f1, f2); + + final IndexEntity indexResult = collection.ensureGeoIndex(fields, null).get(); + assertThat(indexResult).isNotNull(); + assertThat(indexResult.getFields()).contains(f1); + assertThat(indexResult.getFields()).contains(f2); + assertThat(indexResult.getId()).startsWith(COLLECTION_NAME); + assertThat(indexResult.getIsNewlyCreated()).isTrue(); + assertThat(indexResult.getMinLength()).isNull(); + assertThat(indexResult.getSparse()).isTrue(); + assertThat(indexResult.getUnique()).isFalse(); + if (isAtLeastVersion(3, 4)) { + assertThat(indexResult.getType()).isEqualTo(IndexType.geo); + } else { + assertThat(indexResult.getType()).isEqualTo(IndexType.geo2); + } + } + + @ParameterizedTest + @MethodSource("asyncCols") + void createGeo2IndexWithOptions(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 5)); + + String name = "geoIndex-" + rnd(); + final GeoIndexOptions options = new GeoIndexOptions(); + options.name(name); + + String f1 = "field-" + rnd(); + String f2 = "field-" + rnd(); + final Collection fields = Arrays.asList(f1, f2); + + final IndexEntity indexResult = collection.ensureGeoIndex(fields, options).get(); + assertThat(indexResult).isNotNull(); + assertThat(indexResult.getFields()).contains(f1); + assertThat(indexResult.getFields()).contains(f2); + assertThat(indexResult.getId()).startsWith(COLLECTION_NAME); + assertThat(indexResult.getIsNewlyCreated()).isTrue(); + assertThat(indexResult.getMinLength()).isNull(); + assertThat(indexResult.getSparse()).isTrue(); + assertThat(indexResult.getUnique()).isFalse(); + if (isAtLeastVersion(3, 4)) { + assertThat(indexResult.getType()).isEqualTo(IndexType.geo); + } else { + assertThat(indexResult.getType()).isEqualTo(IndexType.geo2); + } + assertThat(indexResult.getName()).isEqualTo(name); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void createPersistentIndex(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + String f1 = "field-" + rnd(); + String f2 = "field-" + rnd(); + final Collection fields = Arrays.asList(f1, f2); + + final IndexEntity indexResult = collection.ensurePersistentIndex(fields, null).get(); + assertThat(indexResult).isNotNull(); + assertThat(indexResult.getConstraint()).isNull(); + assertThat(indexResult.getFields()).contains(f1); + assertThat(indexResult.getFields()).contains(f2); + assertThat(indexResult.getId()).startsWith(COLLECTION_NAME); + assertThat(indexResult.getIsNewlyCreated()).isTrue(); + assertThat(indexResult.getMinLength()).isNull(); + assertThat(indexResult.getSparse()).isFalse(); + assertThat(indexResult.getType()).isEqualTo(IndexType.persistent); + assertThat(indexResult.getUnique()).isFalse(); + assertThat(indexResult.getDeduplicate()).isTrue(); + if (isAtLeastVersion(3, 10)) { + assertThat(indexResult.getCacheEnabled()).isFalse(); + } + } + + @ParameterizedTest + @MethodSource("asyncCols") + void createPersistentIndexCacheEnabled(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 10)); + + String f1 = "field-" + rnd(); + String f2 = "field-" + rnd(); + final Collection fields = Arrays.asList(f1, f2); + + final IndexEntity indexResult = collection.ensurePersistentIndex(fields, new PersistentIndexOptions().cacheEnabled(true)).get(); + assertThat(indexResult).isNotNull(); + assertThat(indexResult.getConstraint()).isNull(); + assertThat(indexResult.getFields()).contains(f1); + assertThat(indexResult.getFields()).contains(f2); + assertThat(indexResult.getId()).startsWith(COLLECTION_NAME); + assertThat(indexResult.getIsNewlyCreated()).isTrue(); + assertThat(indexResult.getMinLength()).isNull(); + assertThat(indexResult.getSparse()).isFalse(); + assertThat(indexResult.getType()).isEqualTo(IndexType.persistent); + assertThat(indexResult.getUnique()).isFalse(); + assertThat(indexResult.getDeduplicate()).isTrue(); + assertThat(indexResult.getCacheEnabled()).isTrue(); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void createPersistentIndexStoredValues(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 10)); + + String f1 = "field-" + rnd(); + String f2 = "field-" + rnd(); + final Collection fields = Arrays.asList(f1, f2); + + final IndexEntity indexResult = collection.ensurePersistentIndex(fields, new PersistentIndexOptions().storedValues("v1", "v2")).get(); + assertThat(indexResult).isNotNull(); + assertThat(indexResult.getConstraint()).isNull(); + assertThat(indexResult.getFields()).contains(f1); + assertThat(indexResult.getFields()).contains(f2); + assertThat(indexResult.getId()).startsWith(COLLECTION_NAME); + assertThat(indexResult.getIsNewlyCreated()).isTrue(); + assertThat(indexResult.getMinLength()).isNull(); + assertThat(indexResult.getSparse()).isFalse(); + assertThat(indexResult.getType()).isEqualTo(IndexType.persistent); + assertThat(indexResult.getUnique()).isFalse(); + assertThat(indexResult.getDeduplicate()).isTrue(); + assertThat(indexResult.getCacheEnabled()).isFalse(); + assertThat(indexResult.getStoredValues()) + .hasSize(2) + .contains("v1", "v2"); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void createPersistentIndexWithOptions(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 5)); + + String name = "persistentIndex-" + rnd(); + final PersistentIndexOptions options = new PersistentIndexOptions(); + options.name(name); + + String f1 = "field-" + rnd(); + String f2 = "field-" + rnd(); + + final Collection fields = Arrays.asList(f1, f2); + final IndexEntity indexResult = collection.ensurePersistentIndex(fields, options).get(); + assertThat(indexResult).isNotNull(); + assertThat(indexResult.getConstraint()).isNull(); + assertThat(indexResult.getFields()).contains(f1); + assertThat(indexResult.getFields()).contains(f2); + assertThat(indexResult.getId()).startsWith(COLLECTION_NAME); + assertThat(indexResult.getIsNewlyCreated()).isTrue(); + assertThat(indexResult.getMinLength()).isNull(); + assertThat(indexResult.getSparse()).isFalse(); + assertThat(indexResult.getType()).isEqualTo(IndexType.persistent); + assertThat(indexResult.getUnique()).isFalse(); + assertThat(indexResult.getName()).isEqualTo(name); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void createZKDIndex(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 9)); + collection.truncate().get(); + String f1 = "field-" + rnd(); + String f2 = "field-" + rnd(); + final Collection fields = Arrays.asList(f1, f2); + + final IndexEntity indexResult = collection.ensureZKDIndex(fields, null).get(); + assertThat(indexResult).isNotNull(); + assertThat(indexResult.getConstraint()).isNull(); + assertThat(indexResult.getFields()).contains(f1); + assertThat(indexResult.getFields()).contains(f2); + assertThat(indexResult.getId()).startsWith(COLLECTION_NAME); + assertThat(indexResult.getIsNewlyCreated()).isTrue(); + assertThat(indexResult.getMinLength()).isNull(); + assertThat(indexResult.getType()).isEqualTo(IndexType.zkd); + assertThat(indexResult.getUnique()).isFalse(); + collection.deleteIndex(indexResult.getId()); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void createZKDIndexWithOptions(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 9)); + collection.truncate().get(); + + String name = "ZKDIndex-" + rnd(); + final ZKDIndexOptions options = + new ZKDIndexOptions().name(name).fieldValueTypes(ZKDIndexOptions.FieldValueTypes.DOUBLE); + + String f1 = "field-" + rnd(); + String f2 = "field-" + rnd(); + + final Collection fields = Arrays.asList(f1, f2); + final IndexEntity indexResult = collection.ensureZKDIndex(fields, options).get(); + assertThat(indexResult).isNotNull(); + assertThat(indexResult.getConstraint()).isNull(); + assertThat(indexResult.getFields()).contains(f1); + assertThat(indexResult.getFields()).contains(f2); + assertThat(indexResult.getId()).startsWith(COLLECTION_NAME); + assertThat(indexResult.getIsNewlyCreated()).isTrue(); + assertThat(indexResult.getMinLength()).isNull(); + assertThat(indexResult.getType()).isEqualTo(IndexType.zkd); + assertThat(indexResult.getUnique()).isFalse(); + assertThat(indexResult.getName()).isEqualTo(name); + collection.deleteIndex(indexResult.getId()).get(); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void createMDIndex(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 12)); + collection.truncate().get(); + + String f1 = "field-" + rnd(); + String f2 = "field-" + rnd(); + + final IndexEntity indexResult = collection.ensureMDIndex(Arrays.asList(f1, f2), null).get(); + assertThat(indexResult).isNotNull(); + assertThat(indexResult.getConstraint()).isNull(); + assertThat(indexResult.getFields()).contains(f1, f2); + assertThat(indexResult.getFieldValueTypes()).isEqualTo(MDIFieldValueTypes.DOUBLE); + assertThat(indexResult.getId()).startsWith(COLLECTION_NAME); + assertThat(indexResult.getIsNewlyCreated()).isTrue(); + assertThat(indexResult.getMinLength()).isNull(); + assertThat(indexResult.getType()).isEqualTo(IndexType.mdi); + assertThat(indexResult.getUnique()).isFalse(); + collection.deleteIndex(indexResult.getId()).get(); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void createMDIndexWithOptions(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 12)); + collection.truncate().get(); + + String name = "MDIndex-" + rnd(); + final MDIndexOptions options = new MDIndexOptions() + .name(name) + .unique(false) + .fieldValueTypes(MDIFieldValueTypes.DOUBLE) + .estimates(false) + .sparse(true) + .storedValues(Arrays.asList("v1", "v2")); + + String f1 = "field-" + rnd(); + String f2 = "field-" + rnd(); + + final IndexEntity indexResult = collection.ensureMDIndex(Arrays.asList(f1, f2), options).get(); + assertThat(indexResult.getType()).isEqualTo(IndexType.mdi); + assertThat(indexResult.getId()).startsWith(COLLECTION_NAME); + assertThat(indexResult.getName()).isEqualTo(name); + assertThat(indexResult.getUnique()).isFalse(); + assertThat(indexResult.getEstimates()).isFalse(); + assertThat(indexResult.getSparse()).isTrue(); + assertThat(indexResult.getStoredValues()) + .hasSize(2) + .contains("v1", "v2"); + assertThat(indexResult.getFields()).contains(f1, f2); + assertThat(indexResult.getFieldValueTypes()).isEqualTo(MDIFieldValueTypes.DOUBLE); + assertThat(indexResult.getIsNewlyCreated()).isTrue(); + collection.deleteIndex(indexResult.getId()).get(); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void createMDPrefixedIndexWithOptions(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 12)); + collection.truncate().get(); + + String name = "MDPrefixedIndex-" + rnd(); + final MDPrefixedIndexOptions options = new MDPrefixedIndexOptions() + .name(name) + .unique(false) + .fieldValueTypes(MDIFieldValueTypes.DOUBLE) + .estimates(false) + .sparse(true) + .storedValues(Arrays.asList("v1", "v2")) + .prefixFields(Arrays.asList("p1", "p2")); + + String f1 = "field-" + rnd(); + String f2 = "field-" + rnd(); + + final IndexEntity indexResult = collection.ensureMDPrefixedIndex(Arrays.asList(f1, f2), options).get(); + assertThat(indexResult.getType()).isEqualTo(IndexType.mdiPrefixed); + assertThat(indexResult.getId()).startsWith(COLLECTION_NAME); + assertThat(indexResult.getName()).isEqualTo(name); + assertThat(indexResult.getUnique()).isFalse(); + assertThat(indexResult.getEstimates()).isFalse(); + assertThat(indexResult.getSparse()).isTrue(); + assertThat(indexResult.getStoredValues()) + .hasSize(2) + .contains("v1", "v2"); + assertThat(indexResult.getFields()).contains(f1, f2); + assertThat(indexResult.getFieldValueTypes()).isEqualTo(MDIFieldValueTypes.DOUBLE); + assertThat(indexResult.getPrefixFields()).contains("p1", "p2"); + assertThat(indexResult.getIsNewlyCreated()).isTrue(); + collection.deleteIndex(indexResult.getId()).get(); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void indexEstimates(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 8)); + assumeTrue(isSingleServer()); + + String name = "persistentIndex-" + rnd(); + final PersistentIndexOptions options = new PersistentIndexOptions(); + options.name(name); + options.estimates(true); + + String f1 = "field-" + rnd(); + String f2 = "field-" + rnd(); + + final Collection fields = Arrays.asList(f1, f2); + final IndexEntity indexResult = collection.ensurePersistentIndex(fields, options).get(); + assertThat(indexResult).isNotNull(); + assertThat(indexResult.getEstimates()).isTrue(); + assertThat(indexResult.getSelectivityEstimate()).isNotNull(); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void indexEstimatesFalse(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 8)); + assumeTrue(isSingleServer()); + + String name = "persistentIndex-" + rnd(); + final PersistentIndexOptions options = new PersistentIndexOptions(); + options.name(name); + options.estimates(false); + + String f1 = "field-" + rnd(); + String f2 = "field-" + rnd(); + + final Collection fields = Arrays.asList(f1, f2); + final IndexEntity indexResult = collection.ensurePersistentIndex(fields, options).get(); + assertThat(indexResult).isNotNull(); + assertThat(indexResult.getEstimates()).isFalse(); + assertThat(indexResult.getSelectivityEstimate()).isNull(); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void indexDeduplicate(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 8)); + + String name = "persistentIndex-" + rnd(); + final PersistentIndexOptions options = new PersistentIndexOptions(); + options.name(name); + options.deduplicate(true); + + String f1 = "field-" + rnd(); + String f2 = "field-" + rnd(); + + final Collection fields = Arrays.asList(f1, f2); + final IndexEntity indexResult = collection.ensurePersistentIndex(fields, options).get(); + assertThat(indexResult).isNotNull(); + assertThat(indexResult.getDeduplicate()).isTrue(); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void indexDeduplicateFalse(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 8)); + + String name = "persistentIndex-" + rnd(); + final PersistentIndexOptions options = new PersistentIndexOptions(); + options.name(name); + options.deduplicate(false); + + String f1 = "field-" + rnd(); + String f2 = "field-" + rnd(); + + final Collection fields = Arrays.asList(f1, f2); + final IndexEntity indexResult = collection.ensurePersistentIndex(fields, options).get(); + assertThat(indexResult).isNotNull(); + assertThat(indexResult.getDeduplicate()).isFalse(); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void createFulltextIndex(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + String f1 = "field-" + rnd(); + final Collection fields = Collections.singletonList(f1); + final IndexEntity indexResult = collection.ensureFulltextIndex(fields, null).get(); + assertThat(indexResult).isNotNull(); + assertThat(indexResult.getConstraint()).isNull(); + assertThat(indexResult.getFields()).contains(f1); + assertThat(indexResult.getId()).startsWith(COLLECTION_NAME); + assertThat(indexResult.getIsNewlyCreated()).isTrue(); + assertThat(indexResult.getSparse()).isTrue(); + assertThat(indexResult.getType()).isEqualTo(IndexType.fulltext); + assertThat(indexResult.getUnique()).isFalse(); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void createFulltextIndexWithOptions(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 5)); + + String name = "fulltextIndex-" + rnd(); + final FulltextIndexOptions options = new FulltextIndexOptions(); + options.name(name); + + String f = "field-" + rnd(); + final Collection fields = Collections.singletonList(f); + final IndexEntity indexResult = collection.ensureFulltextIndex(fields, options).get(); + assertThat(indexResult).isNotNull(); + assertThat(indexResult.getConstraint()).isNull(); + assertThat(indexResult.getFields()).contains(f); + assertThat(indexResult.getId()).startsWith(COLLECTION_NAME); + assertThat(indexResult.getIsNewlyCreated()).isTrue(); + assertThat(indexResult.getSparse()).isTrue(); + assertThat(indexResult.getType()).isEqualTo(IndexType.fulltext); + assertThat(indexResult.getUnique()).isFalse(); + assertThat(indexResult.getName()).isEqualTo(name); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void createTtlIndexWithoutOptions(ArangoCollectionAsync collection) { + assumeTrue(isAtLeastVersion(3, 5)); + final Collection fields = new ArrayList<>(); + fields.add("a"); + + Throwable thrown = catchThrowable(() -> collection.ensureTtlIndex(fields, null).get()).getCause(); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + ArangoDBException e = (ArangoDBException) thrown; + assertThat(e.getResponseCode()).isEqualTo(400); + assertThat(e.getErrorNum()).isEqualTo(10); + assertThat(e.getMessage()).contains("expireAfter attribute must be a number"); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void createTtlIndexWithOptions(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 5)); + + String f1 = "field-" + rnd(); + final Collection fields = Collections.singletonList(f1); + + String name = "ttlIndex-" + rnd(); + final TtlIndexOptions options = new TtlIndexOptions(); + options.name(name); + options.expireAfter(3600); + + final IndexEntity indexResult = collection.ensureTtlIndex(fields, options).get(); + assertThat(indexResult).isNotNull(); + assertThat(indexResult.getFields()).contains(f1); + assertThat(indexResult.getId()).startsWith(COLLECTION_NAME); + assertThat(indexResult.getIsNewlyCreated()).isTrue(); + assertThat(indexResult.getType()).isEqualTo(IndexType.ttl); + assertThat(indexResult.getExpireAfter()).isEqualTo(3600); + assertThat(indexResult.getName()).isEqualTo(name); + + // revert changes + collection.deleteIndex(indexResult.getId()).get(); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void getIndexes(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + String f1 = "field-" + rnd(); + final Collection fields = Collections.singletonList(f1); + collection.ensurePersistentIndex(fields, null).get(); + long matchingIndexes = + collection.getIndexes().get().stream().filter(i -> i.getType() == IndexType.persistent).filter(i -> i.getFields().contains(f1)).count(); + assertThat(matchingIndexes).isEqualTo(1L); + } + + @ParameterizedTest + @MethodSource("edges") + void getEdgeIndex(ArangoCollection edgeCollection) { + Collection indexes = edgeCollection.getIndexes(); + long primaryIndexes = indexes.stream().filter(i -> i.getType() == IndexType.primary).count(); + long edgeIndexes = indexes.stream().filter(i -> i.getType() == IndexType.primary).count(); + assertThat(primaryIndexes).isEqualTo(1L); + assertThat(edgeIndexes).isEqualTo(1L); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void exists(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + assertThat(collection.exists().get()).isTrue(); + assertThat(collection.db().collection(COLLECTION_NAME + "no").exists().get()).isFalse(); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void truncate(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + final BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + collection.insertDocument(doc, null).get(); + final BaseDocument readResult = collection.getDocument(doc.getKey(), BaseDocument.class, null).get(); + assertThat(readResult.getKey()).isEqualTo(doc.getKey()); + final CollectionEntity truncateResult = collection.truncate().get(); + assertThat(truncateResult).isNotNull(); + assertThat(truncateResult.getId()).isNotNull(); + final BaseDocument document = collection.getDocument(doc.getKey(), BaseDocument.class, null).get(); + assertThat(document).isNull(); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void getCount(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + Long initialCount = collection.count().get().getCount(); + collection.insertDocument(RawJson.of("{}")).get(); + final CollectionPropertiesEntity count = collection.count().get(); + assertThat(count.getCount()).isEqualTo(initialCount + 1L); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void documentExists(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + final Boolean existsNot = collection.documentExists(rnd(), null).get(); + assertThat(existsNot).isFalse(); + + String key = rnd(); + RawJson rawJson = RawJson.of("{\"_key\":\"" + key + "\"}"); + collection.insertDocument(rawJson).get(); + final Boolean exists = collection.documentExists(key, null).get(); + assertThat(exists).isTrue(); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void documentExistsIfMatch(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + String key = rnd(); + RawJson rawJson = RawJson.of("{\"_key\":\"" + key + "\"}"); + final DocumentCreateEntity createResult = collection.insertDocument(rawJson).get(); + final DocumentExistsOptions options = new DocumentExistsOptions().ifMatch(createResult.getRev()); + final Boolean exists = collection.documentExists(key, options).get(); + assertThat(exists).isTrue(); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void documentExistsIfMatchFail(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + String key = rnd(); + RawJson rawJson = RawJson.of("{\"_key\":\"" + key + "\"}"); + collection.insertDocument(rawJson).get(); + final DocumentExistsOptions options = new DocumentExistsOptions().ifMatch("no"); + final Boolean exists = collection.documentExists(key, options).get(); + assertThat(exists).isFalse(); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void documentExistsIfNoneMatch(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + String key = rnd(); + RawJson rawJson = RawJson.of("{\"_key\":\"" + key + "\"}"); + collection.insertDocument(rawJson).get(); + final DocumentExistsOptions options = new DocumentExistsOptions().ifNoneMatch("no"); + final Boolean exists = collection.documentExists(key, options).get(); + assertThat(exists).isTrue(); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void documentExistsIfNoneMatchFail(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + String key = rnd(); + RawJson rawJson = RawJson.of("{\"_key\":\"" + key + "\"}"); + final DocumentCreateEntity createResult = collection.insertDocument(rawJson).get(); + final DocumentExistsOptions options = new DocumentExistsOptions().ifNoneMatch(createResult.getRev()); + final Boolean exists = collection.documentExists(key, options).get(); + assertThat(exists).isFalse(); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void insertDocuments(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + final Collection values = Arrays.asList(new BaseDocument(), new BaseDocument(), + new BaseDocument()); + + final MultiDocumentEntity docs = collection.insertDocuments(values).get(); + assertThat(docs).isNotNull(); + assertThat(docs.getDocuments()).isNotNull(); + assertThat(docs.getDocuments()).hasSize(3); + assertThat(docs.getErrors()).isNotNull(); + assertThat(docs.getErrors()).isEmpty(); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void insertDocumentsOverwriteModeUpdate(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 7)); + + final BaseDocument doc1 = new BaseDocument(UUID.randomUUID().toString()); + doc1.addAttribute("foo", "a"); + final DocumentCreateEntity meta1 = collection.insertDocument(doc1).get(); + + final BaseDocument doc2 = new BaseDocument(UUID.randomUUID().toString()); + doc2.addAttribute("foo", "a"); + final DocumentCreateEntity meta2 = collection.insertDocument(doc2).get(); + + doc1.addAttribute("bar", "b"); + doc2.addAttribute("bar", "b"); + + final MultiDocumentEntity> repsert = + collection.insertDocuments(Arrays.asList(doc1, doc2), + new DocumentCreateOptions().overwriteMode(OverwriteMode.update).returnNew(true), BaseDocument.class).get(); + assertThat(repsert).isNotNull(); + assertThat(repsert.getDocuments()).hasSize(2); + assertThat(repsert.getErrors()).isEmpty(); + for (final DocumentCreateEntity documentCreateEntity : repsert.getDocuments()) { + assertThat(documentCreateEntity.getRev()).isNotEqualTo(meta1.getRev()); + assertThat(documentCreateEntity.getRev()).isNotEqualTo(meta2.getRev()); + assertThat(documentCreateEntity.getNew().getAttribute("foo")).isEqualTo("a"); + assertThat(documentCreateEntity.getNew().getAttribute("bar")).isEqualTo("b"); + } + } + + @ParameterizedTest + @MethodSource("asyncCols") + void insertDocumentsJson(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + final Collection values = new ArrayList<>(); + values.add(RawJson.of("{}")); + values.add(RawJson.of("{}")); + values.add(RawJson.of("{}")); + final MultiDocumentEntity docs = collection.insertDocuments(values).get(); + assertThat(docs).isNotNull(); + assertThat(docs.getDocuments()).isNotNull(); + assertThat(docs.getDocuments()).hasSize(3); + assertThat(docs.getErrors()).isNotNull(); + assertThat(docs.getErrors()).isEmpty(); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void insertDocumentsRawData(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + final RawData values = RawJson.of("[{},{},{}]"); + final MultiDocumentEntity docs = collection.insertDocuments(values).get(); + assertThat(docs).isNotNull(); + assertThat(docs.getDocuments()).isNotNull(); + assertThat(docs.getDocuments()).hasSize(3); + assertThat(docs.getErrors()).isNotNull(); + assertThat(docs.getErrors()).isEmpty(); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void insertDocumentsRawDataReturnNew(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + final RawData values = RawJson.of("[{\"aaa\":33},{\"aaa\":33},{\"aaa\":33}]"); + final MultiDocumentEntity> docs = + collection.insertDocuments(values, new DocumentCreateOptions().returnNew(true)).get(); + assertThat(docs).isNotNull(); + assertThat(docs.getDocuments()).isNotNull(); + assertThat(docs.getDocuments()).hasSize(3); + assertThat(docs.getErrors()).isNotNull(); + assertThat(docs.getErrors()).isEmpty(); + + for (final DocumentCreateEntity doc : docs.getDocuments()) { + RawData d = doc.getNew(); + assertThat(d) + .isNotNull() + .isInstanceOf(RawJson.class); + + JsonNode jn = SerdeUtils.INSTANCE.parseJson(((RawJson) d).get()); + assertThat(jn.has("aaa")).isTrue(); + assertThat(jn.get("aaa").intValue()).isEqualTo(33); + } + } + + @ParameterizedTest + @MethodSource("asyncCols") + void insertDocumentsOne(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + final Collection values = new ArrayList<>(); + values.add(new BaseDocument()); + final MultiDocumentEntity docs = collection.insertDocuments(values).get(); + assertThat(docs).isNotNull(); + assertThat(docs.getDocuments()).isNotNull(); + assertThat(docs.getDocuments()).hasSize(1); + assertThat(docs.getErrors()).isNotNull(); + assertThat(docs.getErrors()).isEmpty(); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void insertDocumentsEmpty(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + final Collection values = new ArrayList<>(); + final MultiDocumentEntity docs = collection.insertDocuments(values).get(); + assertThat(docs).isNotNull(); + assertThat(docs.getDocuments()).isNotNull(); + assertThat(docs.getDocuments()).isEmpty(); + assertThat(docs.getErrors()).isNotNull(); + assertThat(docs.getErrors()).isEmpty(); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void insertDocumentsReturnNew(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + final Collection values = new ArrayList<>(); + values.add(new BaseDocument()); + values.add(new BaseDocument()); + values.add(new BaseDocument()); + final DocumentCreateOptions options = new DocumentCreateOptions().returnNew(true); + final MultiDocumentEntity> docs = collection.insertDocuments(values, + options, BaseDocument.class).get(); + assertThat(docs).isNotNull(); + assertThat(docs.getDocuments()).isNotNull(); + assertThat(docs.getDocuments()).hasSize(3); + assertThat(docs.getErrors()).isNotNull(); + assertThat(docs.getErrors()).isEmpty(); + for (final DocumentCreateEntity doc : docs.getDocuments()) { + assertThat(doc.getNew()).isNotNull(); + final BaseDocument baseDocument = doc.getNew(); + assertThat(baseDocument.getKey()).isNotNull(); + } + + } + + @ParameterizedTest + @MethodSource("asyncCols") + void insertDocumentsFail(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + String k1 = rnd(); + String k2 = rnd(); + final Collection values = Arrays.asList(new BaseDocument(k1), new BaseDocument(k2), + new BaseDocument(k2)); + + final MultiDocumentEntity docs = collection.insertDocuments(values).get(); + assertThat(docs).isNotNull(); + assertThat(docs.getDocuments()).isNotNull(); + assertThat(docs.getDocuments()).hasSize(2); + assertThat(docs.getErrors()).isNotNull(); + assertThat(docs.getErrors()).hasSize(1); + assertThat(docs.getErrors().iterator().next().getErrorNum()).isEqualTo(1210); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void importDocuments(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + final Collection values = Arrays.asList(new BaseDocument(), new BaseDocument(), + new BaseDocument()); + + final DocumentImportEntity docs = collection.importDocuments(values).get(); + assertThat(docs).isNotNull(); + assertThat(docs.getCreated()).isEqualTo(values.size()); + assertThat(docs.getEmpty()).isZero(); + assertThat(docs.getErrors()).isZero(); + assertThat(docs.getIgnored()).isZero(); + assertThat(docs.getUpdated()).isZero(); + assertThat(docs.getDetails()).isEmpty(); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void importDocumentsJsonList(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + final Collection values = Arrays.asList( + RawJson.of("{}"), + RawJson.of("{}"), + RawJson.of("{}") + ); + + final DocumentImportEntity docs = collection.importDocuments(values).get(); + assertThat(docs).isNotNull(); + assertThat(docs.getCreated()).isEqualTo(values.size()); + assertThat(docs.getEmpty()).isZero(); + assertThat(docs.getErrors()).isZero(); + assertThat(docs.getIgnored()).isZero(); + assertThat(docs.getUpdated()).isZero(); + assertThat(docs.getDetails()).isEmpty(); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void importDocumentsDuplicateDefaultError(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + String k1 = rnd(); + String k2 = rnd(); + + final Collection values = Arrays.asList(new BaseDocument(k1), new BaseDocument(k2), + new BaseDocument(k2)); + + final DocumentImportEntity docs = collection.importDocuments(values).get(); + assertThat(docs).isNotNull(); + assertThat(docs.getCreated()).isEqualTo(2); + assertThat(docs.getEmpty()).isZero(); + assertThat(docs.getErrors()).isEqualTo(1); + assertThat(docs.getIgnored()).isZero(); + assertThat(docs.getUpdated()).isZero(); + assertThat(docs.getDetails()).isEmpty(); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void importDocumentsDuplicateError(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + String k1 = rnd(); + String k2 = rnd(); + + final Collection values = Arrays.asList(new BaseDocument(k1), new BaseDocument(k2), + new BaseDocument(k2)); + + final DocumentImportEntity docs = collection.importDocuments(values, + new DocumentImportOptions().onDuplicate(OnDuplicate.error)).get(); + assertThat(docs).isNotNull(); + assertThat(docs.getCreated()).isEqualTo(2); + assertThat(docs.getEmpty()).isZero(); + assertThat(docs.getErrors()).isEqualTo(1); + assertThat(docs.getIgnored()).isZero(); + assertThat(docs.getUpdated()).isZero(); + assertThat(docs.getDetails()).isEmpty(); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void importDocumentsDuplicateIgnore(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + String k1 = rnd(); + String k2 = rnd(); + + final Collection values = Arrays.asList(new BaseDocument(k1), new BaseDocument(k2), + new BaseDocument(k2)); + + final DocumentImportEntity docs = collection.importDocuments(values, + new DocumentImportOptions().onDuplicate(OnDuplicate.ignore)).get(); + assertThat(docs).isNotNull(); + assertThat(docs.getCreated()).isEqualTo(2); + assertThat(docs.getEmpty()).isZero(); + assertThat(docs.getErrors()).isZero(); + assertThat(docs.getIgnored()).isEqualTo(1); + assertThat(docs.getUpdated()).isZero(); + assertThat(docs.getDetails()).isEmpty(); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void importDocumentsDuplicateReplace(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + String k1 = rnd(); + String k2 = rnd(); + + final Collection values = Arrays.asList(new BaseDocument(k1), new BaseDocument(k2), + new BaseDocument(k2)); + + final DocumentImportEntity docs = collection.importDocuments(values, + new DocumentImportOptions().onDuplicate(OnDuplicate.replace)).get(); + assertThat(docs).isNotNull(); + assertThat(docs.getCreated()).isEqualTo(2); + assertThat(docs.getEmpty()).isZero(); + assertThat(docs.getErrors()).isZero(); + assertThat(docs.getIgnored()).isZero(); + assertThat(docs.getUpdated()).isEqualTo(1); + assertThat(docs.getDetails()).isEmpty(); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void importDocumentsDuplicateUpdate(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + String k1 = rnd(); + String k2 = rnd(); + + final Collection values = Arrays.asList(new BaseDocument(k1), new BaseDocument(k2), + new BaseDocument(k2)); + + final DocumentImportEntity docs = collection.importDocuments(values, + new DocumentImportOptions().onDuplicate(OnDuplicate.update)).get(); + assertThat(docs).isNotNull(); + assertThat(docs.getCreated()).isEqualTo(2); + assertThat(docs.getEmpty()).isZero(); + assertThat(docs.getErrors()).isZero(); + assertThat(docs.getIgnored()).isZero(); + assertThat(docs.getUpdated()).isEqualTo(1); + assertThat(docs.getDetails()).isEmpty(); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void importDocumentsCompleteFail(ArangoCollectionAsync collection) { + String k1 = rnd(); + String k2 = rnd(); + + final Collection values = Arrays.asList(new BaseDocument(k1), new BaseDocument(k2), + new BaseDocument(k2)); + + Throwable thrown = catchThrowable(() -> collection.importDocuments(values, + new DocumentImportOptions().complete(true)).get()).getCause(); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + ArangoDBException e = (ArangoDBException) thrown; + assertThat(e.getErrorNum()).isEqualTo(1210); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void importDocumentsDetails(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + String k1 = rnd(); + String k2 = rnd(); + + final Collection values = Arrays.asList(new BaseDocument(k1), new BaseDocument(k2), + new BaseDocument(k2)); + + final DocumentImportEntity docs = collection.importDocuments(values, new DocumentImportOptions().details(true)).get(); + assertThat(docs).isNotNull(); + assertThat(docs.getCreated()).isEqualTo(2); + assertThat(docs.getEmpty()).isZero(); + assertThat(docs.getErrors()).isEqualTo(1); + assertThat(docs.getIgnored()).isZero(); + assertThat(docs.getUpdated()).isZero(); + assertThat(docs.getDetails()).hasSize(1); + assertThat(docs.getDetails().iterator().next()).contains("unique constraint violated"); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void importDocumentsOverwriteFalse(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + collection.insertDocument(new BaseDocument()).get(); + Long initialCount = collection.count().get().getCount(); + + final Collection values = new ArrayList<>(); + values.add(new BaseDocument()); + values.add(new BaseDocument()); + collection.importDocuments(values, new DocumentImportOptions().overwrite(false)).get(); + assertThat(collection.count().get().getCount()).isEqualTo(initialCount + 2L); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void importDocumentsOverwriteTrue(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + collection.insertDocument(new BaseDocument()).get(); + + final Collection values = new ArrayList<>(); + values.add(new BaseDocument()); + values.add(new BaseDocument()); + collection.importDocuments(values, new DocumentImportOptions().overwrite(true)).get(); + assertThat(collection.count().get().getCount()).isEqualTo(2L); + } + + @ParameterizedTest + @MethodSource("edges") + void importDocumentsFromToPrefix(ArangoCollection edgeCollection) { + final Collection values = new ArrayList<>(); + final String[] keys = {rnd(), rnd()}; + for (String s : keys) { + values.add(new BaseEdgeDocument(s, "from", "to")); + } + assertThat(values).hasSize(keys.length); + + final DocumentImportEntity importResult = edgeCollection.importDocuments(values, + new DocumentImportOptions().fromPrefix("foo").toPrefix("bar")); + assertThat(importResult).isNotNull(); + assertThat(importResult.getCreated()).isEqualTo(values.size()); + for (String key : keys) { + final BaseEdgeDocument doc = edgeCollection.getDocument(key, BaseEdgeDocument.class); + assertThat(doc).isNotNull(); + assertThat(doc.getFrom()).isEqualTo("foo/from"); + assertThat(doc.getTo()).isEqualTo("bar/to"); + } + } + + @ParameterizedTest + @MethodSource("asyncCols") + void importDocumentsJson(ArangoCollectionAsync collection) throws JsonProcessingException, ExecutionException, InterruptedException { + final String values = mapper.writeValueAsString(Arrays.asList(Collections.singletonMap("_key", rnd()), + Collections.singletonMap("_key", rnd()))); + + final DocumentImportEntity docs = collection.importDocuments(RawJson.of(values)).get(); + assertThat(docs).isNotNull(); + assertThat(docs.getCreated()).isEqualTo(2); + assertThat(docs.getEmpty()).isZero(); + assertThat(docs.getErrors()).isZero(); + assertThat(docs.getIgnored()).isZero(); + assertThat(docs.getUpdated()).isZero(); + assertThat(docs.getDetails()).isEmpty(); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void importDocumentsJsonDuplicateDefaultError(ArangoCollectionAsync collection) throws JsonProcessingException, ExecutionException, InterruptedException { + String k1 = rnd(); + String k2 = rnd(); + + final String values = mapper.writeValueAsString(Arrays.asList(Collections.singletonMap("_key", k1), + Collections.singletonMap("_key", k2), Collections.singletonMap("_key", k2))); + + final DocumentImportEntity docs = collection.importDocuments(RawJson.of(values)).get(); + assertThat(docs).isNotNull(); + assertThat(docs.getCreated()).isEqualTo(2); + assertThat(docs.getEmpty()).isZero(); + assertThat(docs.getErrors()).isEqualTo(1); + assertThat(docs.getIgnored()).isZero(); + assertThat(docs.getUpdated()).isZero(); + assertThat(docs.getDetails()).isEmpty(); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void importDocumentsJsonDuplicateError(ArangoCollectionAsync collection) throws JsonProcessingException, ExecutionException, InterruptedException { + String k1 = rnd(); + String k2 = rnd(); + + final String values = mapper.writeValueAsString(Arrays.asList(Collections.singletonMap("_key", k1), + Collections.singletonMap("_key", k2), Collections.singletonMap("_key", k2))); + + final DocumentImportEntity docs = collection.importDocuments(RawJson.of(values), + new DocumentImportOptions().onDuplicate(OnDuplicate.error)).get(); + assertThat(docs).isNotNull(); + assertThat(docs.getCreated()).isEqualTo(2); + assertThat(docs.getEmpty()).isZero(); + assertThat(docs.getErrors()).isEqualTo(1); + assertThat(docs.getIgnored()).isZero(); + assertThat(docs.getUpdated()).isZero(); + assertThat(docs.getDetails()).isEmpty(); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void importDocumentsJsonDuplicateIgnore(ArangoCollectionAsync collection) throws JsonProcessingException, ExecutionException, InterruptedException { + String k1 = rnd(); + String k2 = rnd(); + + final String values = mapper.writeValueAsString(Arrays.asList(Collections.singletonMap("_key", k1), + Collections.singletonMap("_key", k2), Collections.singletonMap("_key", k2))); + final DocumentImportEntity docs = collection.importDocuments(RawJson.of(values), + new DocumentImportOptions().onDuplicate(OnDuplicate.ignore)).get(); + assertThat(docs).isNotNull(); + assertThat(docs.getCreated()).isEqualTo(2); + assertThat(docs.getEmpty()).isZero(); + assertThat(docs.getErrors()).isZero(); + assertThat(docs.getIgnored()).isEqualTo(1); + assertThat(docs.getUpdated()).isZero(); + assertThat(docs.getDetails()).isEmpty(); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void importDocumentsJsonDuplicateReplace(ArangoCollectionAsync collection) throws JsonProcessingException, ExecutionException, InterruptedException { + String k1 = rnd(); + String k2 = rnd(); + + final String values = mapper.writeValueAsString(Arrays.asList(Collections.singletonMap("_key", k1), + Collections.singletonMap("_key", k2), Collections.singletonMap("_key", k2))); + + final DocumentImportEntity docs = collection.importDocuments(RawJson.of(values), + new DocumentImportOptions().onDuplicate(OnDuplicate.replace)).get(); + assertThat(docs).isNotNull(); + assertThat(docs.getCreated()).isEqualTo(2); + assertThat(docs.getEmpty()).isZero(); + assertThat(docs.getErrors()).isZero(); + assertThat(docs.getIgnored()).isZero(); + assertThat(docs.getUpdated()).isEqualTo(1); + assertThat(docs.getDetails()).isEmpty(); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void importDocumentsJsonDuplicateUpdate(ArangoCollectionAsync collection) throws JsonProcessingException, ExecutionException, InterruptedException { + String k1 = rnd(); + String k2 = rnd(); + + final String values = mapper.writeValueAsString(Arrays.asList(Collections.singletonMap("_key", k1), + Collections.singletonMap("_key", k2), Collections.singletonMap("_key", k2))); + + final DocumentImportEntity docs = collection.importDocuments(RawJson.of(values), + new DocumentImportOptions().onDuplicate(OnDuplicate.update)).get(); + assertThat(docs).isNotNull(); + assertThat(docs.getCreated()).isEqualTo(2); + assertThat(docs.getEmpty()).isZero(); + assertThat(docs.getErrors()).isZero(); + assertThat(docs.getIgnored()).isZero(); + assertThat(docs.getUpdated()).isEqualTo(1); + assertThat(docs.getDetails()).isEmpty(); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void importDocumentsJsonCompleteFail(ArangoCollectionAsync collection) { + final String values = "[{\"_key\":\"1\"},{\"_key\":\"2\"},{\"_key\":\"2\"}]"; + Throwable thrown = catchThrowable(() -> collection.importDocuments(RawJson.of(values), + new DocumentImportOptions().complete(true)).get()).getCause(); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + ArangoDBException e = (ArangoDBException) thrown; + assertThat(e.getErrorNum()).isEqualTo(1210); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void importDocumentsJsonDetails(ArangoCollectionAsync collection) throws JsonProcessingException, ExecutionException, InterruptedException { + String k1 = rnd(); + String k2 = rnd(); + + final String values = mapper.writeValueAsString(Arrays.asList(Collections.singletonMap("_key", k1), + Collections.singletonMap("_key", k2), Collections.singletonMap("_key", k2))); + + final DocumentImportEntity docs = collection.importDocuments(RawJson.of(values), + new DocumentImportOptions().details(true)).get(); + assertThat(docs).isNotNull(); + assertThat(docs.getCreated()).isEqualTo(2); + assertThat(docs.getEmpty()).isZero(); + assertThat(docs.getErrors()).isEqualTo(1); + assertThat(docs.getIgnored()).isZero(); + assertThat(docs.getUpdated()).isZero(); + assertThat(docs.getDetails()).hasSize(1); + assertThat(docs.getDetails().iterator().next()).contains("unique constraint violated"); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void importDocumentsJsonOverwriteFalse(ArangoCollectionAsync collection) throws JsonProcessingException, ExecutionException, InterruptedException { + collection.insertDocument(new BaseDocument()).get(); + Long initialCount = collection.count().get().getCount(); + + final String values = mapper.writeValueAsString(Arrays.asList(Collections.singletonMap("_key", rnd()), + Collections.singletonMap("_key", rnd()))); + collection.importDocuments(RawJson.of(values), new DocumentImportOptions().overwrite(false)).get(); + assertThat(collection.count().get().getCount()).isEqualTo(initialCount + 2L); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void importDocumentsJsonOverwriteTrue(ArangoCollectionAsync collection) throws JsonProcessingException, ExecutionException, InterruptedException { + collection.insertDocument(new BaseDocument()).get(); + + final String values = mapper.writeValueAsString(Arrays.asList(Collections.singletonMap("_key", rnd()), + Collections.singletonMap("_key", rnd()))); + collection.importDocuments(RawJson.of(values), new DocumentImportOptions().overwrite(true)).get(); + assertThat(collection.count().get().getCount()).isEqualTo(2L); + } + + @ParameterizedTest + @MethodSource("edges") + void importDocumentsJsonFromToPrefix(ArangoCollection edgeCollection) throws JsonProcessingException { + String k1 = UUID.randomUUID().toString(); + String k2 = UUID.randomUUID().toString(); + + final String[] keys = {k1, k2}; + + final String values = mapper.writeValueAsString(Arrays.asList(new MapBuilder().put("_key", k1).put("_from", + "from").put("_to", "to").get(), new MapBuilder().put("_key", k2).put("_from", "from").put("_to", "to").get())); + + final DocumentImportEntity importResult = edgeCollection.importDocuments(RawJson.of(values), + new DocumentImportOptions().fromPrefix("foo").toPrefix("bar")); + assertThat(importResult).isNotNull(); + assertThat(importResult.getCreated()).isEqualTo(2); + for (String key : keys) { + final BaseEdgeDocument doc = edgeCollection.getDocument(key, BaseEdgeDocument.class); + assertThat(doc).isNotNull(); + assertThat(doc.getFrom()).isEqualTo("foo/from"); + assertThat(doc.getTo()).isEqualTo("bar/to"); + } + } + + @ParameterizedTest + @MethodSource("asyncCols") + void deleteDocumentsByKey(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + final Collection values = new ArrayList<>(); + { + final BaseDocument e = new BaseDocument(UUID.randomUUID().toString()); + e.setKey("1"); + values.add(e); + } + { + final BaseDocument e = new BaseDocument(UUID.randomUUID().toString()); + e.setKey("2"); + values.add(e); + } + collection.insertDocuments(values).get(); + final Collection keys = new ArrayList<>(); + keys.add("1"); + keys.add("2"); + final MultiDocumentEntity> deleteResult = collection.deleteDocuments(keys).get(); + assertThat(deleteResult).isNotNull(); + assertThat(deleteResult.getDocuments()).hasSize(2); + for (final DocumentDeleteEntity i : deleteResult.getDocuments()) { + assertThat(i.getKey()).isIn("1", "2"); + } + assertThat(deleteResult.getErrors()).isEmpty(); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void deleteDocumentsRawDataByKeyReturnOld(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + final RawData values = RawJson.of("[{\"_key\":\"1\"},{\"_key\":\"2\"}]"); + collection.insertDocuments(values).get(); + final RawData keys = RawJson.of("[\"1\",\"2\"]"); + MultiDocumentEntity> deleteResult = collection.deleteDocuments(keys, + new DocumentDeleteOptions().returnOld(true)).get(); + assertThat(deleteResult).isNotNull(); + assertThat(deleteResult.getDocuments()).hasSize(2); + for (final DocumentDeleteEntity i : deleteResult.getDocuments()) { + assertThat(i.getKey()).isIn("1", "2"); + assertThat(i.getOld()).isNotNull().isInstanceOf(RawJson.class); + JsonNode jn = SerdeUtils.INSTANCE.parseJson(((RawJson) i.getOld()).get()); + assertThat(jn.get("_key").asText()).isEqualTo(i.getKey()); + } + assertThat(deleteResult.getErrors()).isEmpty(); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void deleteDocumentsByDocuments(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + final Collection values = new ArrayList<>(); + { + final BaseDocument e = new BaseDocument(UUID.randomUUID().toString()); + e.setKey("1"); + values.add(e); + } + { + final BaseDocument e = new BaseDocument(UUID.randomUUID().toString()); + e.setKey("2"); + values.add(e); + } + collection.insertDocuments(values).get(); + MultiDocumentEntity> deleteResult = collection.deleteDocuments(values).get(); + assertThat(deleteResult).isNotNull(); + assertThat(deleteResult.getDocuments()).hasSize(2); + for (final DocumentDeleteEntity i : deleteResult.getDocuments()) { + assertThat(i.getKey()).isIn("1", "2"); + } + assertThat(deleteResult.getErrors()).isEmpty(); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void deleteDocumentsByKeyOne(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + final Collection values = new ArrayList<>(); + { + final BaseDocument e = new BaseDocument(UUID.randomUUID().toString()); + e.setKey("1"); + values.add(e); + } + collection.insertDocuments(values).get(); + final Collection keys = new ArrayList<>(); + keys.add("1"); + final MultiDocumentEntity> deleteResult = collection.deleteDocuments(keys).get(); + assertThat(deleteResult).isNotNull(); + assertThat(deleteResult.getDocuments()).hasSize(1); + for (final DocumentDeleteEntity i : deleteResult.getDocuments()) { + assertThat(i.getKey()).isEqualTo("1"); + } + assertThat(deleteResult.getErrors()).isEmpty(); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void deleteDocumentsByDocumentOne(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + final Collection values = new ArrayList<>(); + { + final BaseDocument e = new BaseDocument(UUID.randomUUID().toString()); + e.setKey("1"); + values.add(e); + } + collection.insertDocuments(values).get(); + final MultiDocumentEntity> deleteResult = collection.deleteDocuments(values).get(); + assertThat(deleteResult).isNotNull(); + assertThat(deleteResult.getDocuments()).hasSize(1); + for (final DocumentDeleteEntity i : deleteResult.getDocuments()) { + assertThat(i.getKey()).isEqualTo("1"); + } + assertThat(deleteResult.getErrors()).isEmpty(); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void deleteDocumentsEmpty(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + final Collection values = new ArrayList<>(); + collection.insertDocuments(values).get(); + final Collection keys = new ArrayList<>(); + final MultiDocumentEntity deleteResult = collection.deleteDocuments(keys).get(); + assertThat(deleteResult).isNotNull(); + assertThat(deleteResult.getDocuments()).isEmpty(); + assertThat(deleteResult.getErrors()).isEmpty(); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void deleteDocumentsByKeyNotExisting(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + final Collection values = new ArrayList<>(); + collection.insertDocuments(values).get(); + final Collection keys = Arrays.asList(rnd(), rnd()); + + final MultiDocumentEntity deleteResult = collection.deleteDocuments(keys).get(); + assertThat(deleteResult).isNotNull(); + assertThat(deleteResult.getDocuments()).isEmpty(); + assertThat(deleteResult.getErrors()).hasSize(2); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void deleteDocumentsByDocumentsNotExisting(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + final Collection values = new ArrayList<>(); + { + final BaseDocument e = new BaseDocument(UUID.randomUUID().toString()); + e.setKey("1"); + values.add(e); + } + { + final BaseDocument e = new BaseDocument(UUID.randomUUID().toString()); + e.setKey("2"); + values.add(e); + } + final MultiDocumentEntity deleteResult = collection.deleteDocuments(values).get(); + assertThat(deleteResult).isNotNull(); + assertThat(deleteResult.getDocuments()).isEmpty(); + assertThat(deleteResult.getErrors()).hasSize(2); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void updateDocuments(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + final Collection values = Arrays.asList(new BaseDocument(rnd()), new BaseDocument(rnd())); + collection.insertDocuments(values).get(); + values.forEach(it -> it.addAttribute("a", "test")); + + final MultiDocumentEntity updateResult = collection.updateDocuments(values).get(); + assertThat(updateResult.getDocuments()).hasSize(2); + assertThat(updateResult.getErrors()).isEmpty(); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void updateDocumentsWithDifferentReturnType(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + List keys = + IntStream.range(0, 3).mapToObj(it -> "key-" + UUID.randomUUID()).collect(Collectors.toList()); + List docs = + keys.stream().map(BaseDocument::new).peek(it -> it.addAttribute("a", "test")).collect(Collectors.toList()); + + collection.insertDocuments(docs).get(); + + List> modifiedDocs = docs.stream().peek(it -> it.addAttribute("b", "test")).map(it -> { + Map map = new HashMap<>(); + map.put("_key", it.getKey()); + map.put("a", it.getAttribute("a")); + map.put("b", it.getAttribute("b")); + return map; + }).collect(Collectors.toList()); + + final MultiDocumentEntity> updateResult = + collection.updateDocuments(modifiedDocs, new DocumentUpdateOptions().returnNew(true), BaseDocument.class).get(); + assertThat(updateResult.getDocuments()).hasSize(3); + assertThat(updateResult.getErrors()).isEmpty(); + assertThat(updateResult.getDocuments().stream()).map(DocumentUpdateEntity::getNew).allMatch(it -> it.getAttribute("a").equals("test")).allMatch(it -> it.getAttribute("b").equals("test")); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void updateDocumentsOne(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + final Collection values = new ArrayList<>(); + { + final BaseDocument e = new BaseDocument(UUID.randomUUID().toString()); + e.setKey("1"); + values.add(e); + } + collection.insertDocuments(values).get(); + final Collection updatedValues = new ArrayList<>(); + final BaseDocument first = values.iterator().next(); + first.addAttribute("a", "test"); + updatedValues.add(first); + final MultiDocumentEntity updateResult = collection.updateDocuments(updatedValues).get(); + assertThat(updateResult.getDocuments()).hasSize(1); + assertThat(updateResult.getErrors()).isEmpty(); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void updateDocumentsEmpty(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + final Collection values = new ArrayList<>(); + final MultiDocumentEntity updateResult = collection.updateDocuments(values).get(); + assertThat(updateResult.getDocuments()).isEmpty(); + assertThat(updateResult.getErrors()).isEmpty(); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void updateDocumentsWithoutKey(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + final Collection values = new ArrayList<>(); + { + values.add(new BaseDocument("1")); + } + collection.insertDocuments(values).get(); + final Collection updatedValues = new ArrayList<>(); + for (final BaseDocument i : values) { + i.addAttribute("a", "test"); + updatedValues.add(i); + } + updatedValues.add(new BaseDocument()); + final MultiDocumentEntity updateResult = collection.updateDocuments(updatedValues).get(); + assertThat(updateResult.getDocuments()).hasSize(1); + assertThat(updateResult.getErrors()).hasSize(1); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void updateDocumentsJson(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + final Collection values = new ArrayList<>(); + values.add(RawJson.of("{\"_key\":\"1\"}")); + values.add(RawJson.of("{\"_key\":\"2\"}")); + collection.insertDocuments(values).get(); + + final Collection updatedValues = new ArrayList<>(); + updatedValues.add(RawJson.of("{\"_key\":\"1\", \"foo\":\"bar\"}")); + updatedValues.add(RawJson.of("{\"_key\":\"2\", \"foo\":\"bar\"}")); + final MultiDocumentEntity updateResult = collection.updateDocuments(updatedValues).get(); + assertThat(updateResult.getDocuments()).hasSize(2); + assertThat(updateResult.getErrors()).isEmpty(); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void updateDocumentsRawData(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + final RawData values = RawJson.of("[{\"_key\":\"1\"}, {\"_key\":\"2\"}]"); + collection.insertDocuments(values).get(); + + final RawData updatedValues = RawJson.of("[{\"_key\":\"1\", \"foo\":\"bar\"}, {\"_key\":\"2\", " + + "\"foo\":\"bar\"}]"); + final MultiDocumentEntity updateResult = collection.updateDocuments(updatedValues).get(); + assertThat(updateResult.getDocuments()).hasSize(2); + assertThat(updateResult.getErrors()).isEmpty(); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void updateDocumentsRawDataReturnNew(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + final RawData values = RawJson.of("[{\"_key\":\"1\"}, {\"_key\":\"2\"}]"); + collection.insertDocuments(values).get(); + + final RawData updatedValues = RawJson.of("[{\"_key\":\"1\", \"foo\":\"bar\"}, {\"_key\":\"2\", " + + "\"foo\":\"bar\"}]"); + MultiDocumentEntity> updateResult = + collection.updateDocuments(updatedValues, new DocumentUpdateOptions().returnNew(true)).get(); + assertThat(updateResult.getDocuments()).hasSize(2); + assertThat(updateResult.getErrors()).isEmpty(); + for (DocumentUpdateEntity doc : updateResult.getDocuments()) { + RawData d = doc.getNew(); + assertThat(d) + .isNotNull() + .isInstanceOf(RawJson.class); + + JsonNode jn = SerdeUtils.INSTANCE.parseJson(((RawJson) d).get()); + assertThat(jn.has("foo")).isTrue(); + assertThat(jn.get("foo").textValue()).isEqualTo("bar"); + } + } + + @ParameterizedTest + @MethodSource("asyncCols") + void replaceDocuments(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + final Collection values = new ArrayList<>(); + { + values.add(new BaseDocument("1")); + values.add(new BaseDocument("2")); + } + collection.insertDocuments(values).get(); + final Collection updatedValues = new ArrayList<>(); + for (final BaseDocument i : values) { + i.addAttribute("a", "test"); + updatedValues.add(i); + } + final MultiDocumentEntity updateResult = collection.replaceDocuments(updatedValues).get(); + assertThat(updateResult.getDocuments()).hasSize(2); + assertThat(updateResult.getErrors()).isEmpty(); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void replaceDocumentsOne(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + final Collection values = new ArrayList<>(); + { + final BaseDocument e = new BaseDocument(UUID.randomUUID().toString()); + e.setKey("1"); + values.add(e); + } + collection.insertDocuments(values).get(); + final Collection updatedValues = new ArrayList<>(); + final BaseDocument first = values.iterator().next(); + first.addAttribute("a", "test"); + updatedValues.add(first); + final MultiDocumentEntity updateResult = collection.updateDocuments(updatedValues).get(); + assertThat(updateResult.getDocuments()).hasSize(1); + assertThat(updateResult.getErrors()).isEmpty(); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void replaceDocumentsEmpty(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + final Collection values = new ArrayList<>(); + final MultiDocumentEntity updateResult = collection.updateDocuments(values).get(); + assertThat(updateResult.getDocuments()).isEmpty(); + assertThat(updateResult.getErrors()).isEmpty(); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void replaceDocumentsWithoutKey(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + final Collection values = new ArrayList<>(); + { + values.add(new BaseDocument("1")); + } + collection.insertDocuments(values).get(); + final Collection updatedValues = new ArrayList<>(); + for (final BaseDocument i : values) { + i.addAttribute("a", "test"); + updatedValues.add(i); + } + updatedValues.add(new BaseDocument()); + final MultiDocumentEntity updateResult = collection.updateDocuments(updatedValues).get(); + assertThat(updateResult.getDocuments()).hasSize(1); + assertThat(updateResult.getErrors()).hasSize(1); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void replaceDocumentsJson(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + final Collection values = new ArrayList<>(); + values.add(RawJson.of("{\"_key\":\"1\"}")); + values.add(RawJson.of("{\"_key\":\"2\"}")); + collection.insertDocuments(values).get(); + + final Collection updatedValues = new ArrayList<>(); + updatedValues.add(RawJson.of("{\"_key\":\"1\", \"foo\":\"bar\"}")); + updatedValues.add(RawJson.of("{\"_key\":\"2\", \"foo\":\"bar\"}")); + final MultiDocumentEntity updateResult = collection.replaceDocuments(updatedValues).get(); + assertThat(updateResult.getDocuments()).hasSize(2); + assertThat(updateResult.getErrors()).isEmpty(); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void replaceDocumentsRawData(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + final RawData values = RawJson.of("[{\"_key\":\"1\"}, {\"_key\":\"2\"}]"); + collection.insertDocuments(values).get(); + + final RawData updatedValues = RawJson.of("[{\"_key\":\"1\", \"foo\":\"bar\"}, {\"_key\":\"2\", " + + "\"foo\":\"bar\"}]"); + final MultiDocumentEntity updateResult = collection.replaceDocuments(updatedValues).get(); + assertThat(updateResult.getDocuments()).hasSize(2); + assertThat(updateResult.getErrors()).isEmpty(); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void replaceDocumentsRawDataReturnNew(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + final RawData values = RawJson.of("[{\"_key\":\"1\"}, {\"_key\":\"2\"}]"); + collection.insertDocuments(values).get(); + + final RawData updatedValues = RawJson.of("[{\"_key\":\"1\", \"foo\":\"bar\"}, {\"_key\":\"2\", " + + "\"foo\":\"bar\"}]"); + MultiDocumentEntity> updateResult = + collection.replaceDocuments(updatedValues, new DocumentReplaceOptions().returnNew(true)).get(); + assertThat(updateResult.getDocuments()).hasSize(2); + assertThat(updateResult.getErrors()).isEmpty(); + for (DocumentUpdateEntity doc : updateResult.getDocuments()) { + RawData d = doc.getNew(); + assertThat(d) + .isNotNull() + .isInstanceOf(RawJson.class); + + JsonNode jn = SerdeUtils.INSTANCE.parseJson(((RawJson) d).get()); + assertThat(jn.has("foo")).isTrue(); + assertThat(jn.get("foo").textValue()).isEqualTo("bar"); + } + } + + @ParameterizedTest + @MethodSource("asyncCols") + void getInfo(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + final CollectionEntity result = collection.getInfo().get(); + assertThat(result.getName()).isEqualTo(COLLECTION_NAME); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void getPropeties(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + final CollectionPropertiesEntity result = collection.getProperties().get(); + assertThat(result.getName()).isEqualTo(COLLECTION_NAME); + assertThat(result.getCount()).isNull(); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void changeProperties(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + final CollectionPropertiesEntity properties = collection.getProperties().get(); + assertThat(properties.getWaitForSync()).isNotNull(); + if (isAtLeastVersion(3, 7)) { + assertThat(properties.getSchema()).isNull(); + } + + String schemaRule = ("{ " + " \"properties\": {" + " \"number\": {" + " " + + " \"type\": \"number\"" + " }" + " }" + " }").replaceAll("\\s", ""); + String schemaMessage = "The document has problems!"; + + CollectionPropertiesOptions updatedOptions = + new CollectionPropertiesOptions().waitForSync(!properties.getWaitForSync()).schema(new CollectionSchema().setLevel(CollectionSchema.Level.NEW).setMessage(schemaMessage).setRule(schemaRule)); + + final CollectionPropertiesEntity changedProperties = collection.changeProperties(updatedOptions).get(); + assertThat(changedProperties.getWaitForSync()).isNotNull(); + assertThat(changedProperties.getWaitForSync()).isEqualTo(!properties.getWaitForSync()); + if (isAtLeastVersion(3, 7)) { + assertThat(changedProperties.getSchema()).isNotNull(); + assertThat(changedProperties.getSchema().getLevel()).isEqualTo(CollectionSchema.Level.NEW); + assertThat(changedProperties.getSchema().getMessage()).isEqualTo(schemaMessage); + assertThat(changedProperties.getSchema().getRule()).isEqualTo(schemaRule); + } + + // revert changes + CollectionPropertiesEntity revertedProperties = collection.changeProperties(new CollectionPropertiesOptions() + .waitForSync(properties.getWaitForSync()).schema(new CollectionSchema())).get(); + if (isAtLeastVersion(3, 7)) { + assertThat(revertedProperties.getSchema()).isNull(); + } + + } + + @ParameterizedTest + @MethodSource("asyncCols") + void rename(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + assumeTrue(isSingleServer()); + ArangoDatabaseAsync db = collection.db(); + + if (!db.collection("c1").exists().get()) { + db.collection("c1").create().get(); + } + + if (db.collection("c2").exists().get()) { + db.collection("c2").drop().get(); + } + + final CollectionEntity result = db.collection("c1").rename("c2").get(); + assertThat(result).isNotNull(); + assertThat(result.getName()).isEqualTo("c2"); + + final CollectionEntity info = db.collection("c2").getInfo().get(); + assertThat(info.getName()).isEqualTo("c2"); + + Throwable thrown = catchThrowable(() -> db.collection("c1").getInfo().get()).getCause(); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + ArangoDBException e = (ArangoDBException) thrown; + assertThat(e.getResponseCode()).isEqualTo(404); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void responsibleShard(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + assumeTrue(isCluster()); + assumeTrue(isAtLeastVersion(3, 5)); + ShardEntity shard = collection.getResponsibleShard(new BaseDocument("testKey")).get(); + assertThat(shard).isNotNull(); + assertThat(shard.getShardId()).isNotNull(); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void getRevision(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + final CollectionRevisionEntity result = collection.getRevision().get(); + assertThat(result).isNotNull(); + assertThat(result.getName()).isEqualTo(COLLECTION_NAME); + assertThat(result.getRevision()).isNotNull(); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void keyWithSpecialCharacter(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + final String key = "myKey_-:.@()+,=;$!*'%-" + UUID.randomUUID(); + collection.insertDocument(new BaseDocument(key)).get(); + final BaseDocument doc = collection.getDocument(key, BaseDocument.class).get(); + assertThat(doc).isNotNull(); + assertThat(doc.getKey()).isEqualTo(key); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void alreadyUrlEncodedkey(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + final String key = "http%3A%2F%2Fexample.com%2F-" + UUID.randomUUID(); + collection.insertDocument(new BaseDocument(key)).get(); + final BaseDocument doc = collection.getDocument(key, BaseDocument.class).get(); + assertThat(doc).isNotNull(); + assertThat(doc.getKey()).isEqualTo(key); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void grantAccessRW(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + ArangoDBAsync arangoDB = collection.db().arango(); + try { + arangoDB.createUser("user1", "1234", null).get(); + collection.grantAccess("user1", Permissions.RW).get(); + } finally { + arangoDB.deleteUser("user1").get(); + } + } + + @ParameterizedTest + @MethodSource("asyncCols") + void grantAccessRO(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + ArangoDBAsync arangoDB = collection.db().arango(); + try { + arangoDB.createUser("user1", "1234", null).get(); + collection.grantAccess("user1", Permissions.RO).get(); + } finally { + arangoDB.deleteUser("user1").get(); + } + } + + @ParameterizedTest + @MethodSource("asyncCols") + void grantAccessNONE(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + ArangoDBAsync arangoDB = collection.db().arango(); + try { + arangoDB.createUser("user1", "1234", null).get(); + collection.grantAccess("user1", Permissions.NONE).get(); + } finally { + arangoDB.deleteUser("user1").get(); + } + } + + @ParameterizedTest + @MethodSource("asyncCols") + void grantAccessUserNotFound(ArangoCollectionAsync collection) { + Throwable thrown = catchThrowable(() -> collection.grantAccess("user1", Permissions.RW).get()).getCause(); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void revokeAccess(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + ArangoDBAsync arangoDB = collection.db().arango(); + try { + arangoDB.createUser("user1", "1234", null).get(); + collection.grantAccess("user1", Permissions.NONE).get(); + } finally { + arangoDB.deleteUser("user1").get(); + } + } + + @ParameterizedTest + @MethodSource("asyncCols") + void revokeAccessUserNotFound(ArangoCollectionAsync collection) { + Throwable thrown = catchThrowable(() -> collection.grantAccess("user1", Permissions.NONE).get()).getCause(); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void resetAccess(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + ArangoDBAsync arangoDB = collection.db().arango(); + try { + arangoDB.createUser("user1", "1234", null).get(); + collection.resetAccess("user1").get(); + } finally { + arangoDB.deleteUser("user1").get(); + } + } + + @ParameterizedTest + @MethodSource("asyncCols") + void resetAccessUserNotFound(ArangoCollectionAsync collection) { + Throwable thrown = catchThrowable(() -> collection.resetAccess("user1").get()).getCause(); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void getPermissions(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + assertThat(collection.getPermissions("root").get()).isEqualTo(Permissions.RW); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void annotationsInParamsAndMethods(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + assumeTrue(collection.getSerde().getUserSerde() instanceof JacksonSerde, "JacksonSerde only"); + AnnotatedEntity entity = new AnnotatedEntity(UUID.randomUUID().toString()); + AnnotatedEntity doc = collection.insertDocument(entity, new DocumentCreateOptions().returnNew(true)).get().getNew(); + assertThat(doc).isNotNull(); + assertThat(doc.getKey()).isEqualTo(entity.getKey()); + assertThat(doc.getId()).isNotNull(); + assertThat(doc.getRev()).isNotNull(); + } + + @JsonTypeInfo(use = JsonTypeInfo.Id.CLASS, property = "type") + public interface Animal { + String getKey(); + + String getName(); + } + + public static class Dog implements Animal { + + @Key + private String key; + private String name; + + public Dog() { + } + + public Dog(String key, String name) { + this.key = key; + this.name = name; + } + + @Override + public String getKey() { + return key; + } + + public void setKey(String key) { + this.key = key; + } + + @Override + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + } + + public static class Cat implements Animal { + @Key + private String key; + private String name; + + public Cat() { + } + + public Cat(String key, String name) { + this.key = key; + this.name = name; + } + + @Override + public String getKey() { + return key; + } + + public void setKey(String key) { + this.key = key; + } + + @Override + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + } + + public static class TestUpdateEntity { + private String a, b; + + public String getA() { + return a; + } + + public String getB() { + return b; + } + } + + public static class TestUpdateEntitySerializeNullFalse { + private String a, b; + + @JsonInclude(JsonInclude.Include.NON_NULL) + public String getA() { + return a; + } + + @JsonInclude(JsonInclude.Include.NON_NULL) + public String getB() { + return b; + } + } + + public static class AnnotatedEntity { + + private final String key; + private String id; + private String rev; + + public AnnotatedEntity(@Key String key) { + this.key = key; + } + + public String getKey() { + return key; + } + + public String getId() { + return id; + } + + @Id + public void setId(String id) { + this.id = id; + } + + public String getRev() { + return rev; + } + + @Rev + public void setRev(String rev) { + this.rev = rev; + } + } + +} diff --git a/test-functional/src/test/java/com/arangodb/ArangoCollectionTest.java b/test-functional/src/test/java/com/arangodb/ArangoCollectionTest.java new file mode 100644 index 000000000..aa28ef9c1 --- /dev/null +++ b/test-functional/src/test/java/com/arangodb/ArangoCollectionTest.java @@ -0,0 +1,3799 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb; + +import com.arangodb.entity.*; +import com.arangodb.internal.serde.SerdeUtils; +import com.arangodb.model.*; +import com.arangodb.model.DocumentImportOptions.OnDuplicate; +import com.arangodb.serde.jackson.Id; +import com.arangodb.serde.jackson.JacksonSerde; +import com.arangodb.serde.jackson.Key; +import com.arangodb.serde.jackson.Rev; +import com.arangodb.util.*; +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonTypeInfo; +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.node.JsonNodeFactory; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; + +import java.util.*; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import java.util.stream.Stream; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.catchThrowable; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + +/** + * @author Mark Vollmary + * @author Michele Rastelli + */ +class ArangoCollectionTest extends BaseJunit5 { + + private static final String COLLECTION_NAME = "ArangoCollectionTest_collection"; + private static final String EDGE_COLLECTION_NAME = "ArangoCollectionTest_edge_collection"; + + private final ObjectMapper mapper = new ObjectMapper(); + + private static Stream cols() { + return dbsStream() + .map(mapNamedPayload(db -> db.collection(COLLECTION_NAME))) + .map(Arguments::of); + } + + private static Stream edges() { + return dbsStream() + .map(mapNamedPayload(db -> db.collection(EDGE_COLLECTION_NAME))) + .map(Arguments::of); + } + + @BeforeAll + static void init() { + initCollections(COLLECTION_NAME); + initEdgeCollections(EDGE_COLLECTION_NAME); + } + + @ParameterizedTest + @MethodSource("cols") + void insertDocument(ArangoCollection collection) { + final DocumentCreateEntity doc = collection.insertDocument(new BaseDocument(), null); + assertThat(doc).isNotNull(); + assertThat(doc.getId()).isNotNull(); + assertThat(doc.getKey()).isNotNull(); + assertThat(doc.getRev()).isNotNull(); + assertThat(doc.getNew()).isNull(); + assertThat(doc.getId()).isEqualTo(COLLECTION_NAME + "/" + doc.getKey()); + } + + @ParameterizedTest + @MethodSource("cols") + void insertDocumentWithArrayWithNullValues(ArangoCollection collection) { + List arr = Arrays.asList("a", null); + BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + doc.addAttribute("arr", arr); + + final DocumentCreateEntity insertedDoc = collection.insertDocument(doc, + new DocumentCreateOptions().returnNew(true)); + assertThat(insertedDoc).isNotNull(); + assertThat(insertedDoc.getId()).isNotNull(); + assertThat(insertedDoc.getKey()).isNotNull(); + assertThat(insertedDoc.getRev()).isNotNull(); + assertThat(insertedDoc.getId()).isEqualTo(COLLECTION_NAME + "/" + insertedDoc.getKey()); + //noinspection unchecked + assertThat((List) insertedDoc.getNew().getAttribute("arr")).containsAll(Arrays.asList("a", null)); + } + + @ParameterizedTest + @MethodSource("cols") + void insertDocumentWithNullValues(ArangoCollection collection) { + BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + doc.addAttribute("null", null); + + final DocumentCreateEntity insertedDoc = collection.insertDocument(doc, + new DocumentCreateOptions().returnNew(true)); + assertThat(insertedDoc).isNotNull(); + assertThat(insertedDoc.getId()).isNotNull(); + assertThat(insertedDoc.getKey()).isNotNull(); + assertThat(insertedDoc.getRev()).isNotNull(); + assertThat(insertedDoc.getId()).isEqualTo(COLLECTION_NAME + "/" + insertedDoc.getKey()); + assertThat(insertedDoc.getNew().getProperties()).containsKey("null"); + } + + @ParameterizedTest + @MethodSource("cols") + void insertDocumentUpdateRev(ArangoCollection collection) { + final BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + final DocumentCreateEntity createResult = collection.insertDocument(doc, null); + assertThat(doc.getRevision()).isNull(); + assertThat(createResult.getRev()).isNotNull(); + } + + @ParameterizedTest + @MethodSource("cols") + void insertDocumentReturnNew(ArangoCollection collection) { + final DocumentCreateOptions options = new DocumentCreateOptions().returnNew(true); + final DocumentCreateEntity doc = collection.insertDocument(new BaseDocument(), options); + assertThat(doc).isNotNull(); + assertThat(doc.getId()).isNotNull(); + assertThat(doc.getKey()).isNotNull(); + assertThat(doc.getRev()).isNotNull(); + assertThat(doc.getNew()).isNotNull(); + } + + @ParameterizedTest + @MethodSource("cols") + void insertDocumentWithTypeOverwriteModeReplace(ArangoCollection collection) { + assumeTrue(isAtLeastVersion(3, 7)); + assumeTrue(collection.getSerde().getUserSerde() instanceof JacksonSerde, "polymorphic deserialization support" + + " required"); + + String key = UUID.randomUUID().toString(); + Dog dog = new Dog(key, "Teddy"); + Cat cat = new Cat(key, "Luna"); + + final DocumentCreateOptions options = new DocumentCreateOptions() + .returnNew(true) + .returnOld(true) + .overwriteMode(OverwriteMode.replace); + collection.insertDocument(dog, options); + final DocumentCreateEntity doc = collection.insertDocument(cat, options, Animal.class); + assertThat(doc).isNotNull(); + assertThat(doc.getId()).isNotNull(); + assertThat(doc.getKey()).isNotNull().isEqualTo(key); + assertThat(doc.getRev()).isNotNull(); + + assertThat(doc.getOld()) + .isNotNull() + .isInstanceOf(Dog.class); + assertThat(doc.getOld().getKey()).isEqualTo(key); + assertThat(doc.getOld().getName()).isEqualTo("Teddy"); + + assertThat(doc.getNew()) + .isNotNull() + .isInstanceOf(Cat.class); + assertThat(doc.getNew().getKey()).isEqualTo(key); + assertThat(doc.getNew().getName()).isEqualTo("Luna"); + } + + @ParameterizedTest + @MethodSource("cols") + void insertDocumentOverwriteModeIgnore(ArangoCollection collection) { + assumeTrue(isAtLeastVersion(3, 7)); + + String key = "key-" + UUID.randomUUID(); + final BaseDocument doc = new BaseDocument(key); + doc.addAttribute("foo", "a"); + final DocumentCreateEntity meta = collection.insertDocument(doc); + + final BaseDocument doc2 = new BaseDocument(key); + doc2.addAttribute("bar", "b"); + final DocumentCreateEntity insertIgnore = collection.insertDocument(doc2, + new DocumentCreateOptions().overwriteMode(OverwriteMode.ignore)); + + assertThat(insertIgnore).isNotNull(); + assertThat(insertIgnore.getRev()).isEqualTo(meta.getRev()); + } + + @ParameterizedTest + @MethodSource("cols") + void insertDocumentOverwriteModeConflict(ArangoCollection collection) { + assumeTrue(isAtLeastVersion(3, 7)); + + String key = "key-" + UUID.randomUUID(); + final BaseDocument doc = new BaseDocument(key); + doc.addAttribute("foo", "a"); + collection.insertDocument(doc); + + final BaseDocument doc2 = new BaseDocument(key); + Throwable thrown = catchThrowable(() -> collection.insertDocument(doc2, + new DocumentCreateOptions().overwriteMode(OverwriteMode.conflict))); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + ArangoDBException e = (ArangoDBException) thrown; + assertThat(e.getResponseCode()).isEqualTo(409); + assertThat(e.getErrorNum()).isEqualTo(1210); + } + + @ParameterizedTest + @MethodSource("cols") + void insertDocumentOverwriteModeReplace(ArangoCollection collection) { + assumeTrue(isAtLeastVersion(3, 7)); + + String key = "key-" + UUID.randomUUID(); + final BaseDocument doc = new BaseDocument(key); + doc.addAttribute("foo", "a"); + final DocumentCreateEntity meta = collection.insertDocument(doc); + + final BaseDocument doc2 = new BaseDocument(key); + doc2.addAttribute("bar", "b"); + final DocumentCreateEntity repsert = collection.insertDocument(doc2, + new DocumentCreateOptions().overwriteMode(OverwriteMode.replace).returnNew(true)); + + assertThat(repsert).isNotNull(); + assertThat(repsert.getRev()).isNotEqualTo(meta.getRev()); + assertThat(repsert.getNew().getProperties().containsKey("foo")).isFalse(); + assertThat(repsert.getNew().getAttribute("bar")).isEqualTo("b"); + } + + @ParameterizedTest + @MethodSource("cols") + void insertDocumentOverwriteModeUpdate(ArangoCollection collection) { + assumeTrue(isAtLeastVersion(3, 7)); + + final BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + doc.addAttribute("foo", "a"); + final DocumentCreateEntity meta = collection.insertDocument(doc); + + doc.addAttribute("bar", "b"); + final DocumentCreateEntity updated = collection.insertDocument(doc, + new DocumentCreateOptions().overwriteMode(OverwriteMode.update).returnNew(true)); + + assertThat(updated).isNotNull(); + assertThat(updated.getRev()).isNotEqualTo(meta.getRev()); + assertThat(updated.getNew().getAttribute("foo")).isEqualTo("a"); + assertThat(updated.getNew().getAttribute("bar")).isEqualTo("b"); + } + + @ParameterizedTest + @MethodSource("cols") + void insertDocumentOverwriteModeUpdateMergeObjectsFalse(ArangoCollection collection) { + assumeTrue(isAtLeastVersion(3, 7)); + + final BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + Map fieldA = Collections.singletonMap("a", "a"); + doc.addAttribute("foo", fieldA); + final DocumentCreateEntity meta = collection.insertDocument(doc); + + Map fieldB = Collections.singletonMap("b", "b"); + doc.addAttribute("foo", fieldB); + final DocumentCreateEntity updated = collection.insertDocument(doc, + new DocumentCreateOptions().overwriteMode(OverwriteMode.update).mergeObjects(false).returnNew(true)); + + assertThat(updated).isNotNull(); + assertThat(updated.getRev()).isNotEqualTo(meta.getRev()); + assertThat(updated.getNew().getAttribute("foo")).isEqualTo(fieldB); + } + + @ParameterizedTest + @MethodSource("cols") + void insertDocumentOverwriteModeUpdateKeepNullTrue(ArangoCollection collection) { + assumeTrue(isAtLeastVersion(3, 7)); + + final BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + doc.addAttribute("foo", "bar"); + collection.insertDocument(doc); + + doc.updateAttribute("foo", null); + final BaseDocument updated = collection.insertDocument(doc, new DocumentCreateOptions() + .overwriteMode(OverwriteMode.update) + .keepNull(true) + .returnNew(true)).getNew(); + + assertThat(updated.getProperties()).containsEntry("foo", null); + } + + @ParameterizedTest + @MethodSource("cols") + void insertDocumentOverwriteModeUpdateKeepNullFalse(ArangoCollection collection) { + assumeTrue(isAtLeastVersion(3, 7)); + + final BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + doc.addAttribute("foo", "bar"); + collection.insertDocument(doc); + + doc.updateAttribute("foo", null); + final BaseDocument updated = collection.insertDocument(doc, new DocumentCreateOptions() + .overwriteMode(OverwriteMode.update) + .keepNull(false) + .returnNew(true)).getNew(); + + assertThat(updated.getProperties()).doesNotContainKey("foo"); + } + + @ParameterizedTest + @MethodSource("cols") + void insertDocumentOverwriteModeUpdateWithExternalVersioning(ArangoCollection collection) { + assumeTrue(isAtLeastVersion(3, 12)); + + BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + doc.addAttribute("_version", 1); + collection.insertDocument(doc); + doc.addAttribute("_version", 2); + DocumentCreateEntity updateResult = collection.insertDocument( + doc, + new DocumentCreateOptions() + .overwriteMode(OverwriteMode.update) + .versionAttribute("_version") + .returnNew(true) + ); + assertThat(updateResult.getNew().getAttribute("_version")).isEqualTo(2); + } + + @ParameterizedTest + @MethodSource("cols") + void insertDocumentOverwriteModeUpdateWithExternalVersioningFail(ArangoCollection collection) { + assumeTrue(isAtLeastVersion(3, 12)); + + BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + doc.addAttribute("_version", 1); + collection.insertDocument(doc); + doc.addAttribute("_version", 0); + DocumentCreateEntity updateResult = collection.insertDocument( + doc, + new DocumentCreateOptions() + .overwriteMode(OverwriteMode.update) + .versionAttribute("_version") + .returnNew(true) + ); + assertThat(updateResult.getNew().getAttribute("_version")).isEqualTo(1); + + } + + @ParameterizedTest + @MethodSource("cols") + void insertDocumentsOverwriteModeUpdateWithExternalVersioning(ArangoCollection collection) { + assumeTrue(isAtLeastVersion(3, 12)); + + BaseDocument d1 = new BaseDocument(UUID.randomUUID().toString()); + d1.addAttribute("_version", 1); + BaseDocument d2 = new BaseDocument(UUID.randomUUID().toString()); + d2.addAttribute("_version", 1); + + collection.insertDocuments(Arrays.asList(d1, d2)); + + d1.addAttribute("_version", 2); + d2.addAttribute("_version", 2); + MultiDocumentEntity> updateResult = collection.insertDocuments( + Arrays.asList(d1, d2), + new DocumentCreateOptions() + .overwriteMode(OverwriteMode.update) + .versionAttribute("_version") + .returnNew(true), + BaseDocument.class + ); + + assertThat(updateResult.getDocuments()).allSatisfy(it -> { + assertThat(it.getNew()).isNotNull(); + assertThat(it.getNew().getAttribute("_version")).isEqualTo(2); + }); + } + + @ParameterizedTest + @MethodSource("cols") + void insertDocumentsOverwriteModeUpdateWithExternalVersioningFail(ArangoCollection collection) { + assumeTrue(isAtLeastVersion(3, 12)); + + BaseDocument d1 = new BaseDocument(UUID.randomUUID().toString()); + d1.addAttribute("_version", 1); + BaseDocument d2 = new BaseDocument(UUID.randomUUID().toString()); + d2.addAttribute("_version", 1); + + collection.insertDocuments(Arrays.asList(d1, d2)); + + d1.addAttribute("_version", 0); + d2.addAttribute("_version", 0); + MultiDocumentEntity> updateResult = collection.insertDocuments( + Arrays.asList(d1, d2), + new DocumentCreateOptions() + .overwriteMode(OverwriteMode.update) + .versionAttribute("_version") + .returnNew(true), + BaseDocument.class + ); + + assertThat(updateResult.getDocuments()).allSatisfy(it -> { + assertThat(it.getNew()).isNotNull(); + assertThat(it.getNew().getAttribute("_version")).isEqualTo(1); + }); + } + + @ParameterizedTest + @MethodSource("cols") + void insertDocumentOverwriteModeReplaceWithExternalVersioning(ArangoCollection collection) { + assumeTrue(isAtLeastVersion(3, 12)); + + BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + doc.addAttribute("_version", 1); + collection.insertDocument(doc); + doc.addAttribute("_version", 2); + DocumentCreateEntity updateResult = collection.insertDocument( + doc, + new DocumentCreateOptions() + .overwriteMode(OverwriteMode.replace) + .versionAttribute("_version") + .returnNew(true) + ); + assertThat(updateResult.getNew().getAttribute("_version")).isEqualTo(2); + } + + @ParameterizedTest + @MethodSource("cols") + void insertDocumentOverwriteModeReplaceUpdateWithExternalVersioningFail(ArangoCollection collection) { + assumeTrue(isAtLeastVersion(3, 12)); + + BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + doc.addAttribute("_version", 1); + collection.insertDocument(doc); + doc.addAttribute("_version", 0); + DocumentCreateEntity updateResult = collection.insertDocument( + doc, + new DocumentCreateOptions() + .overwriteMode(OverwriteMode.replace) + .versionAttribute("_version") + .returnNew(true) + ); + assertThat(updateResult.getNew().getAttribute("_version")).isEqualTo(1); + + } + + @ParameterizedTest + @MethodSource("cols") + void insertDocumentsOverwriteModeReplaceWithExternalVersioning(ArangoCollection collection) { + assumeTrue(isAtLeastVersion(3, 12)); + + BaseDocument d1 = new BaseDocument(UUID.randomUUID().toString()); + d1.addAttribute("_version", 1); + BaseDocument d2 = new BaseDocument(UUID.randomUUID().toString()); + d2.addAttribute("_version", 1); + + collection.insertDocuments(Arrays.asList(d1, d2)); + + d1.addAttribute("_version", 2); + d2.addAttribute("_version", 2); + MultiDocumentEntity> updateResult = collection.insertDocuments( + Arrays.asList(d1, d2), + new DocumentCreateOptions() + .overwriteMode(OverwriteMode.replace) + .versionAttribute("_version") + .returnNew(true), + BaseDocument.class + ); + + assertThat(updateResult.getDocuments()).allSatisfy(it -> { + assertThat(it.getNew()).isNotNull(); + assertThat(it.getNew().getAttribute("_version")).isEqualTo(2); + }); + } + + @ParameterizedTest + @MethodSource("cols") + void insertDocumentsOverwriteModeReplaceWithExternalVersioningFail(ArangoCollection collection) { + assumeTrue(isAtLeastVersion(3, 12)); + + BaseDocument d1 = new BaseDocument(UUID.randomUUID().toString()); + d1.addAttribute("_version", 1); + BaseDocument d2 = new BaseDocument(UUID.randomUUID().toString()); + d2.addAttribute("_version", 1); + + collection.insertDocuments(Arrays.asList(d1, d2)); + + d1.addAttribute("_version", 0); + d2.addAttribute("_version", 0); + MultiDocumentEntity> updateResult = collection.insertDocuments( + Arrays.asList(d1, d2), + new DocumentCreateOptions() + .overwriteMode(OverwriteMode.replace) + .versionAttribute("_version") + .returnNew(true), + BaseDocument.class + ); + + assertThat(updateResult.getDocuments()).allSatisfy(it -> { + assertThat(it.getNew()).isNotNull(); + assertThat(it.getNew().getAttribute("_version")).isEqualTo(1); + }); + } + + @ParameterizedTest + @MethodSource("cols") + void insertDocumentWaitForSync(ArangoCollection collection) { + final DocumentCreateOptions options = new DocumentCreateOptions().waitForSync(true); + final DocumentCreateEntity doc = collection.insertDocument(new BaseDocument(), options); + assertThat(doc).isNotNull(); + assertThat(doc.getId()).isNotNull(); + assertThat(doc.getKey()).isNotNull(); + assertThat(doc.getRev()).isNotNull(); + assertThat(doc.getNew()).isNull(); + } + + @ParameterizedTest + @MethodSource("cols") + void insertDocumentRefillIndexCaches(ArangoCollection collection) { + final DocumentCreateOptions options = new DocumentCreateOptions().refillIndexCaches(true); + final DocumentCreateEntity doc = collection.insertDocument(new BaseDocument(), options); + assertThat(doc).isNotNull(); + assertThat(doc.getId()).isNotNull(); + assertThat(doc.getKey()).isNotNull(); + assertThat(doc.getRev()).isNotNull(); + assertThat(doc.getNew()).isNull(); + } + + @ParameterizedTest + @MethodSource("cols") + void insertDocumentAsJson(ArangoCollection collection) { + String key = "doc-" + UUID.randomUUID(); + RawJson rawJson = RawJson.of("{\"_key\":\"" + key + "\",\"a\":\"test\"}"); + final DocumentCreateEntity doc = collection.insertDocument(rawJson); + assertThat(doc).isNotNull(); + assertThat(doc.getId()).isEqualTo(collection.name() + "/" + key); + assertThat(doc.getKey()).isEqualTo(key); + assertThat(doc.getRev()).isNotNull(); + } + + @ParameterizedTest + @MethodSource("cols") + void insertDocumentAsBytes(ArangoCollection collection) { + String key = "doc-" + UUID.randomUUID(); + Map doc = new HashMap<>(); + doc.put("_key", key); + doc.put("a", "test"); + byte[] bytes = collection.getSerde().serializeUserData(doc); + RawBytes rawJson = RawBytes.of(bytes); + final DocumentCreateEntity createEntity = collection.insertDocument(rawJson, + new DocumentCreateOptions().returnNew(true)); + assertThat(createEntity).isNotNull(); + assertThat(createEntity.getId()).isEqualTo(collection.name() + "/" + key); + assertThat(createEntity.getKey()).isEqualTo(key); + assertThat(createEntity.getRev()).isNotNull(); + assertThat(createEntity.getNew()).isNotNull().isInstanceOf(RawBytes.class); + Map newDoc = collection.getSerde().getUserSerde().deserialize(createEntity.getNew().get(), Map.class); + assertThat(newDoc).containsAllEntriesOf(doc); + } + + @ParameterizedTest + @MethodSource("cols") + void insertDocumentSilent(ArangoCollection collection) { + assumeTrue(isSingleServer()); + final DocumentCreateEntity meta = collection.insertDocument(new BaseDocument(), + new DocumentCreateOptions().silent(true)); + assertThat(meta).isNotNull(); + assertThat(meta.getId()).isNull(); + assertThat(meta.getKey()).isNull(); + assertThat(meta.getRev()).isNull(); + } + + @ParameterizedTest + @MethodSource("cols") + void insertDocumentSilentDontTouchInstance(ArangoCollection collection) { + assumeTrue(isSingleServer()); + final BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + final String key = "testkey-" + UUID.randomUUID(); + doc.setKey(key); + final DocumentCreateEntity meta = collection.insertDocument(doc, + new DocumentCreateOptions().silent(true)); + assertThat(meta).isNotNull(); + assertThat(meta.getKey()).isNull(); + assertThat(doc.getKey()).isEqualTo(key); + } + + @ParameterizedTest + @MethodSource("cols") + void insertDocumentsSilent(ArangoCollection collection) { + assumeTrue(isSingleServer()); + final MultiDocumentEntity> info = + collection.insertDocuments(Arrays.asList(new BaseDocument(), new BaseDocument()), + new DocumentCreateOptions().silent(true), BaseDocument.class); + assertThat(info).isNotNull(); + assertThat(info.getDocuments()).isEmpty(); + assertThat(info.getDocumentsAndErrors()).isEmpty(); + assertThat(info.getErrors()).isEmpty(); + } + + @ParameterizedTest + @MethodSource("cols") + void insertDocumentsWithErrors(ArangoCollection collection) { + // BTS-615 + assumeTrue(isAtLeastVersion(3, 11)); + + final MultiDocumentEntity> res = + collection.insertDocuments(Arrays.asList( + new BaseDocument(), + new BaseDocument("<>"), + new BaseDocument() + ), + new DocumentCreateOptions(), BaseDocument.class); + assertThat(res).isNotNull(); + assertThat(res.getDocuments()).hasSize(2); + assertThat(res.getErrors()).hasSize(1); + assertThat(res.getDocumentsAndErrors()).hasSize(3); + assertThat(res.getDocumentsAndErrors().get(0)).isSameAs(res.getDocuments().get(0)); + assertThat(res.getDocumentsAndErrors().get(1)).isSameAs(res.getErrors().get(0)); + assertThat(res.getDocumentsAndErrors().get(2)).isSameAs(res.getDocuments().get(1)); + } + + @ParameterizedTest + @MethodSource("cols") + void insertDocumentsRefillIndexCaches(ArangoCollection collection) { + final MultiDocumentEntity> info = + collection.insertDocuments(Arrays.asList(new BaseDocument(), new BaseDocument()), + new DocumentCreateOptions().refillIndexCaches(true), BaseDocument.class); + assertThat(info.getErrors()).isEmpty(); + } + + @ParameterizedTest + @MethodSource("cols") + void getDocument(ArangoCollection collection) { + final DocumentCreateEntity createResult = collection.insertDocument(new BaseDocument(), null); + assertThat(createResult.getKey()).isNotNull(); + final BaseDocument readResult = collection.getDocument(createResult.getKey(), BaseDocument.class, null); + assertThat(readResult.getKey()).isEqualTo(createResult.getKey()); + assertThat(readResult.getId()).isEqualTo(COLLECTION_NAME + "/" + createResult.getKey()); + } + + @ParameterizedTest + @MethodSource("cols") + void getDocumentIfMatch(ArangoCollection collection) { + final DocumentCreateEntity createResult = collection.insertDocument(new BaseDocument(), null); + assertThat(createResult.getKey()).isNotNull(); + final DocumentReadOptions options = new DocumentReadOptions().ifMatch(createResult.getRev()); + final BaseDocument readResult = collection.getDocument(createResult.getKey(), BaseDocument.class, options); + assertThat(readResult.getKey()).isEqualTo(createResult.getKey()); + assertThat(readResult.getId()).isEqualTo(COLLECTION_NAME + "/" + createResult.getKey()); + } + + @ParameterizedTest + @MethodSource("cols") + void getDocumentIfMatchFail(ArangoCollection collection) { + final DocumentCreateEntity createResult = collection.insertDocument(new BaseDocument(), null); + assertThat(createResult.getKey()).isNotNull(); + final DocumentReadOptions options = new DocumentReadOptions().ifMatch("no"); + final BaseDocument document = collection.getDocument(createResult.getKey(), BaseDocument.class, options); + assertThat(document).isNull(); + } + + @ParameterizedTest + @MethodSource("cols") + void getDocumentIfNoneMatch(ArangoCollection collection) { + final DocumentCreateEntity createResult = collection.insertDocument(new BaseDocument(), null); + assertThat(createResult.getKey()).isNotNull(); + final DocumentReadOptions options = new DocumentReadOptions().ifNoneMatch("no"); + final BaseDocument readResult = collection.getDocument(createResult.getKey(), BaseDocument.class, options); + assertThat(readResult.getKey()).isEqualTo(createResult.getKey()); + assertThat(readResult.getId()).isEqualTo(COLLECTION_NAME + "/" + createResult.getKey()); + } + + @ParameterizedTest + @MethodSource("cols") + void getDocumentIfNoneMatchFail(ArangoCollection collection) { + final DocumentCreateEntity createResult = collection.insertDocument(new BaseDocument(), null); + assertThat(createResult.getKey()).isNotNull(); + final DocumentReadOptions options = new DocumentReadOptions().ifNoneMatch(createResult.getRev()); + final BaseDocument document = collection.getDocument(createResult.getKey(), BaseDocument.class, options); + assertThat(document).isNull(); + } + + @ParameterizedTest + @MethodSource("cols") + void getDocumentAsJson(ArangoCollection collection) { + String key = rnd(); + RawJson rawJson = RawJson.of("{\"_key\":\"" + key + "\",\"a\":\"test\"}"); + collection.insertDocument(rawJson); + final RawJson readResult = collection.getDocument(key, RawJson.class); + assertThat(readResult.get()).contains("\"_key\":\"" + key + "\"").contains("\"_id\":\"" + COLLECTION_NAME + "/" + key + "\""); + } + + @ParameterizedTest + @MethodSource("cols") + void getDocumentNotFound(ArangoCollection collection) { + final BaseDocument document = collection.getDocument("no", BaseDocument.class); + assertThat(document).isNull(); + } + + @ParameterizedTest + @MethodSource("cols") + void getDocumentNotFoundOptionsDefault(ArangoCollection collection) { + final BaseDocument document = collection.getDocument("no", BaseDocument.class, new DocumentReadOptions()); + assertThat(document).isNull(); + } + + @ParameterizedTest + @MethodSource("cols") + void getDocumentNotFoundOptionsNull(ArangoCollection collection) { + final BaseDocument document = collection.getDocument("no", BaseDocument.class, null); + assertThat(document).isNull(); + } + + @ParameterizedTest + @MethodSource("cols") + void getDocumentWrongKey(ArangoCollection collection) { + Throwable thrown = catchThrowable(() -> collection.getDocument("no/no", BaseDocument.class)); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + } + + @SlowTest + @ParameterizedTest + @MethodSource("cols") + void getDocumentDirtyRead(ArangoCollection collection) throws InterruptedException { + final BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + collection.insertDocument(doc, new DocumentCreateOptions()); + Thread.sleep(2000); + final RawJson document = collection.getDocument(doc.getKey(), RawJson.class, + new DocumentReadOptions().allowDirtyRead(true)); + assertThat(document).isNotNull(); + } + + @ParameterizedTest + @MethodSource("cols") + void getDocuments(ArangoCollection collection) { + final Collection values = new ArrayList<>(); + values.add(new BaseDocument("1")); + values.add(new BaseDocument("2")); + values.add(new BaseDocument("3")); + collection.insertDocuments(values); + final MultiDocumentEntity documents = collection.getDocuments(Arrays.asList("1", "2", "3"), + BaseDocument.class); + assertThat(documents).isNotNull(); + assertThat(documents.getDocuments()).hasSize(3); + for (final BaseDocument document : documents.getDocuments()) { + assertThat(document.getId()).isIn(COLLECTION_NAME + "/" + "1", COLLECTION_NAME + "/" + "2", + COLLECTION_NAME + "/" + "3"); + } + } + + @ParameterizedTest + @MethodSource("cols") + void getDocumentsUserData(ArangoCollection collection) { + Cat a = new Cat(); + a.setKey(UUID.randomUUID().toString()); + a.setName("a"); + + Cat b = new Cat(); + b.setKey(UUID.randomUUID().toString()); + b.setName("b"); + + final List values = Arrays.asList(a, b); + collection.insertDocuments(values); + final MultiDocumentEntity documents = collection.getDocuments(Arrays.asList(a.getKey(), b.getKey()), + Cat.class); + assertThat(documents).isNotNull(); + assertThat(documents.getDocuments()) + .hasSize(2) + .anySatisfy(d -> { + assertThat(d.getKey()).isEqualTo(a.getKey()); + assertThat(d.getName()).isEqualTo(a.getName()); + }) + .anySatisfy(d -> { + assertThat(d.getKey()).isEqualTo(b.getKey()); + assertThat(d.getName()).isEqualTo(b.getName()); + }); + } + + @ParameterizedTest + @MethodSource("cols") + void getDocumentsWithCustomShardingKey(ArangoCollection c) { + ArangoCollection collection = c.db().collection("customShardingKeyCollection"); + if (collection.exists()) collection.drop(); + + collection.create(new CollectionCreateOptions().shardKeys("customField").numberOfShards(10)); + + List values = + IntStream.range(0, 10).mapToObj(String::valueOf).map(key -> new BaseDocument()).peek(it -> it.addAttribute( + "customField", rnd())).collect(Collectors.toList()); + + MultiDocumentEntity> inserted = collection.insertDocuments(values); + List insertedKeys = + inserted.getDocuments().stream().map(DocumentEntity::getKey).collect(Collectors.toList()); + + final Collection documents = + collection.getDocuments(insertedKeys, BaseDocument.class).getDocuments(); + + assertThat(documents).hasSize(10); + } + + @ParameterizedTest + @MethodSource("cols") + void getDocumentsDirtyRead(ArangoCollection collection) { + assumeTrue(isCluster()); // skip activefailover + final Collection values = new ArrayList<>(); + values.add(new BaseDocument("1")); + values.add(new BaseDocument("2")); + values.add(new BaseDocument("3")); + collection.insertDocuments(values); + final MultiDocumentEntity documents = collection.getDocuments(Arrays.asList("1", "2", "3"), + BaseDocument.class, new DocumentReadOptions().allowDirtyRead(true)); + assertThat(documents).isNotNull(); + if (isAtLeastVersion(3, 10)) { + assertThat(documents.isPotentialDirtyRead()).isTrue(); + } + assertThat(documents.getDocuments()).hasSize(3); + for (final BaseDocument document : documents.getDocuments()) { + assertThat(document.getId()).isIn(COLLECTION_NAME + "/" + "1", COLLECTION_NAME + "/" + "2", + COLLECTION_NAME + "/" + "3"); + } + } + + @ParameterizedTest + @MethodSource("cols") + void getDocumentsNotFound(ArangoCollection collection) { + final MultiDocumentEntity readResult = collection.getDocuments(Collections.singleton("no"), + BaseDocument.class); + assertThat(readResult).isNotNull(); + assertThat(readResult.getDocuments()).isEmpty(); + assertThat(readResult.getErrors()).hasSize(1); + } + + @ParameterizedTest + @MethodSource("cols") + void getDocumentsWrongKey(ArangoCollection collection) { + final MultiDocumentEntity readResult = collection.getDocuments(Collections.singleton("no/no"), + BaseDocument.class); + assertThat(readResult).isNotNull(); + assertThat(readResult.getDocuments()).isEmpty(); + assertThat(readResult.getErrors()).hasSize(1); + } + + @ParameterizedTest + @MethodSource("cols") + void updateDocument(ArangoCollection collection) { + final BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + doc.addAttribute("a", "test"); + doc.addAttribute("c", "test"); + final DocumentCreateEntity createResult = collection.insertDocument(doc, null); + doc.updateAttribute("a", "test1"); + doc.addAttribute("b", "test"); + doc.updateAttribute("c", null); + final DocumentUpdateEntity updateResult = collection.updateDocument(createResult.getKey(), doc, + null); + assertThat(updateResult).isNotNull(); + assertThat(updateResult.getId()).isEqualTo(createResult.getId()); + assertThat(updateResult.getNew()).isNull(); + assertThat(updateResult.getOld()).isNull(); + assertThat(updateResult.getRev()).isNotEqualTo(updateResult.getOldRev()); + assertThat(updateResult.getOldRev()).isEqualTo(createResult.getRev()); + + final BaseDocument readResult = collection.getDocument(createResult.getKey(), BaseDocument.class, null); + assertThat(readResult.getKey()).isEqualTo(createResult.getKey()); + assertThat(readResult.getAttribute("a")).isNotNull(); + assertThat(String.valueOf(readResult.getAttribute("a"))).isEqualTo("test1"); + assertThat(readResult.getAttribute("b")).isNotNull(); + assertThat(String.valueOf(readResult.getAttribute("b"))).isEqualTo("test"); + assertThat(readResult.getRevision()).isEqualTo(updateResult.getRev()); + assertThat(readResult.getProperties()).containsKey("c"); + } + + @ParameterizedTest + @MethodSource("cols") + void updateDocumentWithDifferentReturnType(ArangoCollection collection) { + final String key = "key-" + UUID.randomUUID(); + final BaseDocument doc = new BaseDocument(key); + doc.addAttribute("a", "test"); + collection.insertDocument(doc); + + final DocumentUpdateEntity updateResult = collection.updateDocument(key, + Collections.singletonMap("b", "test"), new DocumentUpdateOptions().returnNew(true), BaseDocument.class); + assertThat(updateResult).isNotNull(); + assertThat(updateResult.getKey()).isEqualTo(key); + BaseDocument updated = updateResult.getNew(); + assertThat(updated).isNotNull(); + assertThat(updated.getAttribute("a")).isEqualTo("test"); + assertThat(updated.getAttribute("b")).isEqualTo("test"); + } + + @ParameterizedTest + @MethodSource("cols") + void updateDocumentUpdateRev(ArangoCollection collection) { + final BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + final DocumentCreateEntity createResult = collection.insertDocument(doc, null); + doc.addAttribute("foo", "bar"); + final DocumentUpdateEntity updateResult = collection.updateDocument(createResult.getKey(), doc, + null); + assertThat(doc.getRevision()).isNull(); + assertThat(createResult.getRev()).isNotNull(); + assertThat(updateResult.getRev()) + .isNotNull() + .isNotEqualTo(createResult.getRev()); + } + + @ParameterizedTest + @MethodSource("cols") + void updateDocumentIfMatch(ArangoCollection collection) { + final BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + doc.addAttribute("a", "test"); + doc.addAttribute("c", "test"); + final DocumentCreateEntity createResult = collection.insertDocument(doc, null); + doc.updateAttribute("a", "test1"); + doc.addAttribute("b", "test"); + doc.updateAttribute("c", null); + final DocumentUpdateOptions options = new DocumentUpdateOptions().ifMatch(createResult.getRev()); + final DocumentUpdateEntity updateResult = collection.updateDocument(createResult.getKey(), doc, + options); + assertThat(updateResult).isNotNull(); + assertThat(updateResult.getId()).isEqualTo(createResult.getId()); + assertThat(updateResult.getRev()).isNotEqualTo(updateResult.getOldRev()); + assertThat(updateResult.getOldRev()).isEqualTo(createResult.getRev()); + + final BaseDocument readResult = collection.getDocument(createResult.getKey(), BaseDocument.class, null); + assertThat(readResult.getKey()).isEqualTo(createResult.getKey()); + assertThat(readResult.getAttribute("a")).isNotNull(); + assertThat(String.valueOf(readResult.getAttribute("a"))).isEqualTo("test1"); + assertThat(readResult.getAttribute("b")).isNotNull(); + assertThat(String.valueOf(readResult.getAttribute("b"))).isEqualTo("test"); + assertThat(readResult.getRevision()).isEqualTo(updateResult.getRev()); + assertThat(readResult.getProperties()).containsKey("c"); + } + + @ParameterizedTest + @MethodSource("cols") + void updateDocumentIfMatchFail(ArangoCollection collection) { + final BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + doc.addAttribute("a", "test"); + doc.addAttribute("c", "test"); + final DocumentCreateEntity createResult = collection.insertDocument(doc, null); + doc.updateAttribute("a", "test1"); + doc.addAttribute("b", "test"); + doc.updateAttribute("c", null); + + final DocumentUpdateOptions options = new DocumentUpdateOptions().ifMatch("no"); + Throwable thrown = catchThrowable(() -> collection.updateDocument(createResult.getKey(), doc, options)); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + } + + @ParameterizedTest + @MethodSource("cols") + void updateDocumentWithExternalVersioning(ArangoCollection collection) { + assumeTrue(isAtLeastVersion(3, 12)); + + BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + doc.addAttribute("_version", 1); + collection.insertDocument(doc); + doc.addAttribute("_version", 2); + DocumentUpdateEntity updateResult = collection.updateDocument( + doc.getKey(), + doc, + new DocumentUpdateOptions().versionAttribute("_version").returnNew(true) + ); + assertThat(updateResult.getNew().getAttribute("_version")).isEqualTo(2); + } + + @ParameterizedTest + @MethodSource("cols") + void updateDocumentWithExternalVersioningFail(ArangoCollection collection) { + assumeTrue(isAtLeastVersion(3, 12)); + + BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + doc.addAttribute("_version", 1); + collection.insertDocument(doc); + doc.addAttribute("_version", 0); + DocumentUpdateEntity updateResult = collection.updateDocument( + doc.getKey(), + doc, + new DocumentUpdateOptions().versionAttribute("_version").returnNew(true) + ); + assertThat(updateResult.getNew().getAttribute("_version")).isEqualTo(1); + } + + @ParameterizedTest + @MethodSource("cols") + void updateDocumentsWithExternalVersioning(ArangoCollection collection) { + assumeTrue(isAtLeastVersion(3, 12)); + + BaseDocument d1 = new BaseDocument(UUID.randomUUID().toString()); + d1.addAttribute("_version", 1); + BaseDocument d2 = new BaseDocument(UUID.randomUUID().toString()); + d2.addAttribute("_version", 1); + + collection.insertDocuments(Arrays.asList(d1, d2)); + + d1.addAttribute("_version", 2); + d2.addAttribute("_version", 2); + MultiDocumentEntity> updateResult = collection.updateDocuments( + Arrays.asList(d1, d2), + new DocumentUpdateOptions().versionAttribute("_version").returnNew(true), + BaseDocument.class + ); + + assertThat(updateResult.getDocuments()).allSatisfy(it -> { + assertThat(it.getNew()).isNotNull(); + assertThat(it.getNew().getAttribute("_version")).isEqualTo(2); + }); + } + + @ParameterizedTest + @MethodSource("cols") + void updateDocumentsWithExternalVersioningFail(ArangoCollection collection) { + assumeTrue(isAtLeastVersion(3, 12)); + + BaseDocument d1 = new BaseDocument(UUID.randomUUID().toString()); + d1.addAttribute("_version", 1); + BaseDocument d2 = new BaseDocument(UUID.randomUUID().toString()); + d2.addAttribute("_version", 1); + + collection.insertDocuments(Arrays.asList(d1, d2)); + + d1.addAttribute("_version", 0); + d2.addAttribute("_version", 0); + MultiDocumentEntity> updateResult = collection.updateDocuments( + Arrays.asList(d1, d2), + new DocumentUpdateOptions().versionAttribute("_version").returnNew(true), + BaseDocument.class + ); + + assertThat(updateResult.getDocuments()).allSatisfy(it -> { + assertThat(it.getNew()).isNotNull(); + assertThat(it.getNew().getAttribute("_version")).isEqualTo(1); + }); + } + + @ParameterizedTest + @MethodSource("cols") + void updateDocumentReturnNew(ArangoCollection collection) { + final BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + doc.addAttribute("a", "test"); + final DocumentCreateEntity createResult = collection.insertDocument(doc, null); + doc.updateAttribute("a", "test1"); + doc.addAttribute("b", "test"); + final DocumentUpdateOptions options = new DocumentUpdateOptions().returnNew(true); + final DocumentUpdateEntity updateResult = collection.updateDocument(createResult.getKey(), doc, + options); + assertThat(updateResult).isNotNull(); + assertThat(updateResult.getId()).isEqualTo(createResult.getId()); + assertThat(updateResult.getOldRev()).isEqualTo(createResult.getRev()); + assertThat(updateResult.getNew()).isNotNull(); + assertThat(updateResult.getNew().getKey()).isEqualTo(createResult.getKey()); + assertThat(updateResult.getNew().getRevision()).isNotEqualTo(createResult.getRev()); + assertThat(updateResult.getNew().getAttribute("a")).isNotNull(); + assertThat(String.valueOf(updateResult.getNew().getAttribute("a"))).isEqualTo("test1"); + assertThat(updateResult.getNew().getAttribute("b")).isNotNull(); + assertThat(String.valueOf(updateResult.getNew().getAttribute("b"))).isEqualTo("test"); + } + + @ParameterizedTest + @MethodSource("cols") + void updateDocumentReturnOld(ArangoCollection collection) { + final BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + doc.addAttribute("a", "test"); + final DocumentCreateEntity createResult = collection.insertDocument(doc, null); + doc.updateAttribute("a", "test1"); + doc.addAttribute("b", "test"); + final DocumentUpdateOptions options = new DocumentUpdateOptions().returnOld(true); + final DocumentUpdateEntity updateResult = collection.updateDocument(createResult.getKey(), doc, + options); + assertThat(updateResult).isNotNull(); + assertThat(updateResult.getId()).isEqualTo(createResult.getId()); + assertThat(updateResult.getOldRev()).isEqualTo(createResult.getRev()); + assertThat(updateResult.getOld()).isNotNull(); + assertThat(updateResult.getOld().getKey()).isEqualTo(createResult.getKey()); + assertThat(updateResult.getOld().getRevision()).isEqualTo(createResult.getRev()); + assertThat(updateResult.getOld().getAttribute("a")).isNotNull(); + assertThat(String.valueOf(updateResult.getOld().getAttribute("a"))).isEqualTo("test"); + assertThat(updateResult.getOld().getProperties().keySet()).doesNotContain("b"); + } + + @ParameterizedTest + @MethodSource("cols") + void updateDocumentKeepNullTrue(ArangoCollection collection) { + final BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + doc.addAttribute("a", "test"); + final DocumentCreateEntity createResult = collection.insertDocument(doc, null); + doc.updateAttribute("a", null); + final DocumentUpdateOptions options = new DocumentUpdateOptions().keepNull(true); + final DocumentUpdateEntity updateResult = collection.updateDocument(createResult.getKey(), doc, + options); + assertThat(updateResult).isNotNull(); + assertThat(updateResult.getId()).isEqualTo(createResult.getId()); + assertThat(updateResult.getRev()).isNotEqualTo(updateResult.getOldRev()); + assertThat(updateResult.getOldRev()).isEqualTo(createResult.getRev()); + + final BaseDocument readResult = collection.getDocument(createResult.getKey(), BaseDocument.class, null); + assertThat(readResult.getKey()).isEqualTo(createResult.getKey()); + assertThat(readResult.getProperties()).containsKey("a"); + } + + @ParameterizedTest + @MethodSource("cols") + void updateDocumentKeepNullFalse(ArangoCollection collection) { + final BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + doc.addAttribute("a", "test"); + final DocumentCreateEntity createResult = collection.insertDocument(doc, null); + doc.updateAttribute("a", null); + final DocumentUpdateOptions options = new DocumentUpdateOptions().keepNull(false); + final DocumentUpdateEntity updateResult = collection.updateDocument(createResult.getKey(), doc, + options); + assertThat(updateResult).isNotNull(); + assertThat(updateResult.getId()).isEqualTo(createResult.getId()); + assertThat(updateResult.getRev()).isNotEqualTo(updateResult.getOldRev()); + assertThat(updateResult.getOldRev()).isEqualTo(createResult.getRev()); + + final BaseDocument readResult = collection.getDocument(createResult.getKey(), BaseDocument.class, null); + assertThat(readResult.getKey()).isEqualTo(createResult.getKey()); + assertThat(readResult.getId()).isEqualTo(createResult.getId()); + assertThat(readResult.getRevision()).isNotNull(); + assertThat(readResult.getProperties().keySet()).doesNotContain("a"); + } + + @ParameterizedTest + @MethodSource("cols") + void updateDocumentSerializeNullTrue(ArangoCollection collection) { + final TestUpdateEntity doc = new TestUpdateEntity(); + doc.a = "foo"; + doc.b = "foo"; + final DocumentCreateEntity createResult = collection.insertDocument(doc); + final TestUpdateEntity patchDoc = new TestUpdateEntity(); + patchDoc.a = "bar"; + final DocumentUpdateEntity updateResult = collection.updateDocument(createResult.getKey(), patchDoc); + assertThat(updateResult).isNotNull(); + assertThat(updateResult.getKey()).isEqualTo(createResult.getKey()); + + final BaseDocument readResult = collection.getDocument(createResult.getKey(), BaseDocument.class); + assertThat(readResult.getKey()).isEqualTo(createResult.getKey()); + assertThat(readResult.getProperties()).containsKey("a"); + assertThat(readResult.getAttribute("a")).isEqualTo("bar"); + } + + @ParameterizedTest + @MethodSource("cols") + void updateDocumentSerializeNullFalse(ArangoCollection collection) { + final TestUpdateEntitySerializeNullFalse doc = new TestUpdateEntitySerializeNullFalse(); + doc.a = "foo"; + doc.b = "foo"; + final DocumentCreateEntity createResult = collection.insertDocument(doc); + final TestUpdateEntitySerializeNullFalse patchDoc = new TestUpdateEntitySerializeNullFalse(); + patchDoc.a = "bar"; + final DocumentUpdateEntity updateResult = collection.updateDocument(createResult.getKey(), patchDoc); + assertThat(updateResult).isNotNull(); + assertThat(updateResult.getKey()).isEqualTo(createResult.getKey()); + + final BaseDocument readResult = collection.getDocument(createResult.getKey(), BaseDocument.class); + assertThat(readResult.getKey()).isEqualTo(createResult.getKey()); + assertThat(readResult.getProperties()).containsKeys("a", "b"); + assertThat(readResult.getAttribute("a")).isEqualTo("bar"); + assertThat(readResult.getAttribute("b")).isEqualTo("foo"); + } + + @ParameterizedTest + @MethodSource("cols") + void updateDocumentMergeObjectsTrue(ArangoCollection collection) { + final BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + final Map a = new HashMap<>(); + a.put("a", "test"); + doc.addAttribute("a", a); + final DocumentCreateEntity createResult = collection.insertDocument(doc, null); + a.clear(); + a.put("b", "test"); + doc.updateAttribute("a", a); + final DocumentUpdateOptions options = new DocumentUpdateOptions().mergeObjects(true); + final DocumentUpdateEntity updateResult = collection.updateDocument(createResult.getKey(), doc, + options); + assertThat(updateResult).isNotNull(); + assertThat(updateResult.getId()).isEqualTo(createResult.getId()); + assertThat(updateResult.getRev()).isNotEqualTo(updateResult.getOldRev()); + assertThat(updateResult.getOldRev()).isEqualTo(createResult.getRev()); + + final BaseDocument readResult = collection.getDocument(createResult.getKey(), BaseDocument.class, null); + assertThat(readResult.getKey()).isEqualTo(createResult.getKey()); + final Object aResult = readResult.getAttribute("a"); + assertThat(aResult).isInstanceOf(Map.class); + final Map aMap = (Map) aResult; + assertThat(aMap).containsKeys("a", "b"); + } + + @ParameterizedTest + @MethodSource("cols") + void updateDocumentMergeObjectsFalse(ArangoCollection collection) { + final BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + final Map a = new HashMap<>(); + a.put("a", "test"); + doc.addAttribute("a", a); + final DocumentCreateEntity createResult = collection.insertDocument(doc, null); + a.clear(); + a.put("b", "test"); + doc.updateAttribute("a", a); + final DocumentUpdateOptions options = new DocumentUpdateOptions().mergeObjects(false); + final DocumentUpdateEntity updateResult = collection.updateDocument(createResult.getKey(), doc, + options); + assertThat(updateResult).isNotNull(); + assertThat(updateResult.getId()).isEqualTo(createResult.getId()); + assertThat(updateResult.getRev()).isNotEqualTo(updateResult.getOldRev()); + assertThat(updateResult.getOldRev()).isEqualTo(createResult.getRev()); + + final BaseDocument readResult = collection.getDocument(createResult.getKey(), BaseDocument.class, null); + assertThat(readResult.getKey()).isEqualTo(createResult.getKey()); + final Object aResult = readResult.getAttribute("a"); + assertThat(aResult).isInstanceOf(Map.class); + final Map aMap = (Map) aResult; + assertThat(aMap.keySet()).doesNotContain("a"); + assertThat(aMap).containsKey("b"); + } + + @ParameterizedTest + @MethodSource("cols") + void updateDocumentIgnoreRevsFalse(ArangoCollection collection) { + final BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + doc.addAttribute("a", "test"); + final DocumentCreateEntity createResult = collection.insertDocument(doc, null); + doc.updateAttribute("a", "test1"); + doc.setRevision("no"); + + final DocumentUpdateOptions options = new DocumentUpdateOptions().ignoreRevs(false); + Throwable thrown = catchThrowable(() -> collection.updateDocument(createResult.getKey(), doc, options)); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + } + + @ParameterizedTest + @MethodSource("cols") + void updateDocumentSilent(ArangoCollection collection) { + assumeTrue(isSingleServer()); + final DocumentCreateEntity createResult = collection.insertDocument(new BaseDocument()); + final DocumentUpdateEntity meta = collection.updateDocument(createResult.getKey(), + new BaseDocument(), new DocumentUpdateOptions().silent(true)); + assertThat(meta).isNotNull(); + assertThat(meta.getId()).isNull(); + assertThat(meta.getKey()).isNull(); + assertThat(meta.getRev()).isNull(); + } + + @ParameterizedTest + @MethodSource("cols") + void updateDocumentsSilent(ArangoCollection collection) { + assumeTrue(isSingleServer()); + final DocumentCreateEntity createResult = collection.insertDocument(new BaseDocument()); + final MultiDocumentEntity> info = + collection.updateDocuments(Collections.singletonList(new BaseDocument(createResult.getKey())), + new DocumentUpdateOptions().silent(true), BaseDocument.class); + assertThat(info).isNotNull(); + assertThat(info.getDocuments()).isEmpty(); + assertThat(info.getDocumentsAndErrors()).isEmpty(); + assertThat(info.getErrors()).isEmpty(); + } + + @ParameterizedTest + @MethodSource("cols") + void updateNonExistingDocument(ArangoCollection collection) { + final BaseDocument doc = new BaseDocument("test-" + rnd()); + doc.addAttribute("a", "test"); + doc.addAttribute("c", "test"); + + Throwable thrown = catchThrowable(() -> collection.updateDocument(doc.getKey(), doc, null)); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + ArangoDBException e = (ArangoDBException) thrown; + assertThat(e.getResponseCode()).isEqualTo(404); + assertThat(e.getErrorNum()).isEqualTo(1202); + } + + @ParameterizedTest + @MethodSource("cols") + void updateDocumentPreconditionFailed(ArangoCollection collection) { + final BaseDocument doc = new BaseDocument("test-" + rnd()); + doc.addAttribute("foo", "a"); + final DocumentCreateEntity createResult = collection.insertDocument(doc, null); + + doc.updateAttribute("foo", "b"); + collection.updateDocument(doc.getKey(), doc, null); + + doc.updateAttribute("foo", "c"); + Throwable thrown = catchThrowable(() -> collection.updateDocument(doc.getKey(), doc, + new DocumentUpdateOptions().ifMatch(createResult.getRev()))); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + ArangoDBException e = (ArangoDBException) thrown; + assertThat(e.getResponseCode()).isEqualTo(412); + assertThat(e.getErrorNum()).isEqualTo(1200); + BaseDocument readDocument = collection.getDocument(doc.getKey(), BaseDocument.class); + assertThat(readDocument.getAttribute("foo")).isEqualTo("b"); + } + + @ParameterizedTest + @MethodSource("cols") + void updateDocumentRefillIndexCaches(ArangoCollection collection) { + BaseDocument doc = new BaseDocument(); + DocumentCreateEntity createResult = collection.insertDocument(doc); + doc.addAttribute("foo", "bar"); + DocumentUpdateEntity updateResult = collection.updateDocument(createResult.getKey(), + doc, new DocumentUpdateOptions().refillIndexCaches(true)); + assertThat(updateResult.getRev()) + .isNotNull() + .isNotEqualTo(createResult.getRev()); + } + + @ParameterizedTest + @MethodSource("cols") + void updateDocumentsRefillIndexCaches(ArangoCollection collection) { + final DocumentCreateEntity createResult = collection.insertDocument(new BaseDocument()); + final MultiDocumentEntity> info = + collection.updateDocuments(Collections.singletonList(new BaseDocument(createResult.getKey())), + new DocumentUpdateOptions().refillIndexCaches(true), BaseDocument.class); + assertThat(info.getErrors()).isEmpty(); + } + + @ParameterizedTest + @MethodSource("cols") + void replaceDocument(ArangoCollection collection) { + final BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + doc.addAttribute("a", "test"); + final DocumentCreateEntity createResult = collection.insertDocument(doc, null); + doc.removeAttribute("a"); + doc.addAttribute("b", "test"); + final DocumentUpdateEntity replaceResult = collection.replaceDocument(createResult.getKey(), + doc, null); + assertThat(replaceResult).isNotNull(); + assertThat(replaceResult.getId()).isEqualTo(createResult.getId()); + assertThat(replaceResult.getNew()).isNull(); + assertThat(replaceResult.getOld()).isNull(); + assertThat(replaceResult.getRev()).isNotEqualTo(replaceResult.getOldRev()); + assertThat(replaceResult.getOldRev()).isEqualTo(createResult.getRev()); + + final BaseDocument readResult = collection.getDocument(createResult.getKey(), BaseDocument.class, null); + assertThat(readResult.getKey()).isEqualTo(createResult.getKey()); + assertThat(readResult.getRevision()).isEqualTo(replaceResult.getRev()); + assertThat(readResult.getProperties().keySet()).doesNotContain("a"); + assertThat(readResult.getAttribute("b")).isNotNull(); + assertThat(String.valueOf(readResult.getAttribute("b"))).isEqualTo("test"); + } + + @ParameterizedTest + @MethodSource("cols") + void replaceDocumentUpdateRev(ArangoCollection collection) { + final BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + final DocumentCreateEntity createResult = collection.insertDocument(doc, null); + final DocumentUpdateEntity replaceResult = collection.replaceDocument(createResult.getKey(), + doc, null); + assertThat(doc.getRevision()).isNull(); + assertThat(createResult.getRev()).isNotNull(); + assertThat(replaceResult.getRev()) + .isNotNull() + .isNotEqualTo(createResult.getRev()); + } + + @ParameterizedTest + @MethodSource("cols") + void replaceDocumentIfMatch(ArangoCollection collection) { + final BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + doc.addAttribute("a", "test"); + final DocumentCreateEntity createResult = collection.insertDocument(doc, null); + doc.removeAttribute("a"); + doc.addAttribute("b", "test"); + final DocumentReplaceOptions options = new DocumentReplaceOptions().ifMatch(createResult.getRev()); + final DocumentUpdateEntity replaceResult = collection.replaceDocument(createResult.getKey(), + doc, options); + assertThat(replaceResult).isNotNull(); + assertThat(replaceResult.getId()).isEqualTo(createResult.getId()); + assertThat(replaceResult.getRev()).isNotEqualTo(replaceResult.getOldRev()); + assertThat(replaceResult.getOldRev()).isEqualTo(createResult.getRev()); + + final BaseDocument readResult = collection.getDocument(createResult.getKey(), BaseDocument.class, null); + assertThat(readResult.getKey()).isEqualTo(createResult.getKey()); + assertThat(readResult.getRevision()).isEqualTo(replaceResult.getRev()); + assertThat(readResult.getProperties().keySet()).doesNotContain("a"); + assertThat(readResult.getAttribute("b")).isNotNull(); + assertThat(String.valueOf(readResult.getAttribute("b"))).isEqualTo("test"); + } + + @ParameterizedTest + @MethodSource("cols") + void replaceDocumentIfMatchFail(ArangoCollection collection) { + final BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + doc.addAttribute("a", "test"); + final DocumentCreateEntity createResult = collection.insertDocument(doc, null); + doc.removeAttribute("a"); + doc.addAttribute("b", "test"); + + final DocumentReplaceOptions options = new DocumentReplaceOptions().ifMatch("no"); + Throwable thrown = catchThrowable(() -> collection.replaceDocument(createResult.getKey(), doc, options)); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + + } + + @ParameterizedTest + @MethodSource("cols") + void replaceDocumentIgnoreRevsFalse(ArangoCollection collection) { + final BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + doc.addAttribute("a", "test"); + final DocumentCreateEntity createResult = collection.insertDocument(doc, null); + doc.removeAttribute("a"); + doc.addAttribute("b", "test"); + doc.setRevision("no"); + + final DocumentReplaceOptions options = new DocumentReplaceOptions().ignoreRevs(false); + Throwable thrown = catchThrowable(() -> collection.replaceDocument(createResult.getKey(), doc, options)); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + } + + @ParameterizedTest + @MethodSource("cols") + void replaceDocumentWithExternalVersioning(ArangoCollection collection) { + assumeTrue(isAtLeastVersion(3, 12)); + + BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + doc.addAttribute("_version", 1); + collection.insertDocument(doc); + doc.addAttribute("_version", 2); + DocumentUpdateEntity replaceResult = collection.replaceDocument( + doc.getKey(), + doc, + new DocumentReplaceOptions().versionAttribute("_version").returnNew(true) + ); + assertThat(replaceResult.getNew().getAttribute("_version")).isEqualTo(2); + } + + @ParameterizedTest + @MethodSource("cols") + void replaceDocumentWithExternalVersioningFail(ArangoCollection collection) { + assumeTrue(isAtLeastVersion(3, 12)); + + BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + doc.addAttribute("_version", 1); + collection.insertDocument(doc); + doc.addAttribute("_version", 0); + DocumentUpdateEntity replaceResult = collection.replaceDocument( + doc.getKey(), + doc, + new DocumentReplaceOptions().versionAttribute("_version").returnNew(true) + ); + assertThat(replaceResult.getNew().getAttribute("_version")).isEqualTo(1); + } + + @ParameterizedTest + @MethodSource("cols") + void replaceDocumentsWithExternalVersioning(ArangoCollection collection) { + assumeTrue(isAtLeastVersion(3, 12)); + + BaseDocument d1 = new BaseDocument(UUID.randomUUID().toString()); + d1.addAttribute("_version", 1); + BaseDocument d2 = new BaseDocument(UUID.randomUUID().toString()); + d2.addAttribute("_version", 1); + + collection.insertDocuments(Arrays.asList(d1, d2)); + + d1.addAttribute("_version", 2); + d2.addAttribute("_version", 2); + MultiDocumentEntity> replaceResult = collection.replaceDocuments( + Arrays.asList(d1, d2), + new DocumentReplaceOptions().versionAttribute("_version").returnNew(true), + BaseDocument.class + ); + + assertThat(replaceResult.getDocuments()).allSatisfy(it -> { + assertThat(it.getNew()).isNotNull(); + assertThat(it.getNew().getAttribute("_version")).isEqualTo(2); + }); + } + + @ParameterizedTest + @MethodSource("cols") + void replaceDocumentsWithExternalVersioningFail(ArangoCollection collection) { + assumeTrue(isAtLeastVersion(3, 12)); + + BaseDocument d1 = new BaseDocument(UUID.randomUUID().toString()); + d1.addAttribute("_version", 1); + BaseDocument d2 = new BaseDocument(UUID.randomUUID().toString()); + d2.addAttribute("_version", 1); + + collection.insertDocuments(Arrays.asList(d1, d2)); + + d1.addAttribute("_version", 0); + d2.addAttribute("_version", 0); + MultiDocumentEntity> replaceResult = collection.replaceDocuments( + Arrays.asList(d1, d2), + new DocumentReplaceOptions().versionAttribute("_version").returnNew(true), + BaseDocument.class + ); + + assertThat(replaceResult.getDocuments()).allSatisfy(it -> { + assertThat(it.getNew()).isNotNull(); + assertThat(it.getNew().getAttribute("_version")).isEqualTo(1); + }); + } + + @ParameterizedTest + @MethodSource("cols") + void replaceDocumentReturnNew(ArangoCollection collection) { + final BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + doc.addAttribute("a", "test"); + final DocumentCreateEntity createResult = collection.insertDocument(doc, null); + doc.removeAttribute("a"); + doc.addAttribute("b", "test"); + final DocumentReplaceOptions options = new DocumentReplaceOptions().returnNew(true); + final DocumentUpdateEntity replaceResult = collection.replaceDocument(createResult.getKey(), + doc, options); + assertThat(replaceResult).isNotNull(); + assertThat(replaceResult.getId()).isEqualTo(createResult.getId()); + assertThat(replaceResult.getOldRev()).isEqualTo(createResult.getRev()); + assertThat(replaceResult.getNew()).isNotNull(); + assertThat(replaceResult.getNew().getKey()).isEqualTo(createResult.getKey()); + assertThat(replaceResult.getNew().getRevision()).isNotEqualTo(createResult.getRev()); + assertThat(replaceResult.getNew().getProperties().keySet()).doesNotContain("a"); + assertThat(replaceResult.getNew().getAttribute("b")).isNotNull(); + assertThat(String.valueOf(replaceResult.getNew().getAttribute("b"))).isEqualTo("test"); + } + + @ParameterizedTest + @MethodSource("cols") + void replaceDocumentReturnOld(ArangoCollection collection) { + final BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + doc.addAttribute("a", "test"); + final DocumentCreateEntity createResult = collection.insertDocument(doc, null); + doc.removeAttribute("a"); + doc.addAttribute("b", "test"); + final DocumentReplaceOptions options = new DocumentReplaceOptions().returnOld(true); + final DocumentUpdateEntity replaceResult = collection.replaceDocument(createResult.getKey(), + doc, options); + assertThat(replaceResult).isNotNull(); + assertThat(replaceResult.getId()).isEqualTo(createResult.getId()); + assertThat(replaceResult.getOldRev()).isEqualTo(createResult.getRev()); + assertThat(replaceResult.getOld()).isNotNull(); + assertThat(replaceResult.getOld().getKey()).isEqualTo(createResult.getKey()); + assertThat(replaceResult.getOld().getRevision()).isEqualTo(createResult.getRev()); + assertThat(replaceResult.getOld().getAttribute("a")).isNotNull(); + assertThat(String.valueOf(replaceResult.getOld().getAttribute("a"))).isEqualTo("test"); + assertThat(replaceResult.getOld().getProperties().keySet()).doesNotContain("b"); + } + + @ParameterizedTest + @MethodSource("cols") + void replaceDocumentSilent(ArangoCollection collection) { + assumeTrue(isSingleServer()); + final DocumentCreateEntity createResult = collection.insertDocument(new BaseDocument()); + final DocumentUpdateEntity meta = collection.replaceDocument(createResult.getKey(), + new BaseDocument(), new DocumentReplaceOptions().silent(true)); + assertThat(meta).isNotNull(); + assertThat(meta.getId()).isNull(); + assertThat(meta.getKey()).isNull(); + assertThat(meta.getRev()).isNull(); + } + + @ParameterizedTest + @MethodSource("cols") + void replaceDocumentSilentDontTouchInstance(ArangoCollection collection) { + assumeTrue(isSingleServer()); + final BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + final DocumentCreateEntity createResult = collection.insertDocument(doc); + final DocumentUpdateEntity meta = collection.replaceDocument(createResult.getKey(), doc, + new DocumentReplaceOptions().silent(true)); + assertThat(meta.getRev()).isNull(); + assertThat(doc.getRevision()).isNull(); + assertThat(createResult.getRev()).isNotNull(); + } + + @ParameterizedTest + @MethodSource("cols") + void replaceDocumentsSilent(ArangoCollection collection) { + assumeTrue(isSingleServer()); + final DocumentCreateEntity createResult = collection.insertDocument(new BaseDocument()); + final MultiDocumentEntity> info = + collection.replaceDocuments(Collections.singletonList(new BaseDocument(createResult.getKey())), + new DocumentReplaceOptions().silent(true), BaseDocument.class); + assertThat(info).isNotNull(); + assertThat(info.getDocuments()).isEmpty(); + assertThat(info.getDocumentsAndErrors()).isEmpty(); + assertThat(info.getErrors()).isEmpty(); + } + + @ParameterizedTest + @MethodSource("cols") + void replaceDocumentRefillIndexCaches(ArangoCollection collection) { + final BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + final DocumentCreateEntity createResult = collection.insertDocument(doc); + final DocumentUpdateEntity replaceResult = collection.replaceDocument(createResult.getKey(), doc, + new DocumentReplaceOptions().refillIndexCaches(true)); + assertThat(replaceResult.getRev()) + .isNotNull() + .isNotEqualTo(createResult.getRev()); + } + + @ParameterizedTest + @MethodSource("cols") + void replaceDocumentsRefillIndexCaches(ArangoCollection collection) { + final DocumentCreateEntity createResult = collection.insertDocument(new BaseDocument()); + final MultiDocumentEntity> info = + collection.replaceDocuments(Collections.singletonList(new BaseDocument(createResult.getKey())), + new DocumentReplaceOptions().refillIndexCaches(true), BaseDocument.class); + assertThat(info.getErrors()).isEmpty(); + } + + @ParameterizedTest + @MethodSource("cols") + void deleteDocument(ArangoCollection collection) { + final BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + final DocumentCreateEntity createResult = collection.insertDocument(doc, null); + collection.deleteDocument(createResult.getKey()); + final BaseDocument document = collection.getDocument(createResult.getKey(), BaseDocument.class, null); + assertThat(document).isNull(); + } + + @ParameterizedTest + @MethodSource("cols") + void deleteDocumentReturnOld(ArangoCollection collection) { + final BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + doc.addAttribute("a", "test"); + final DocumentCreateEntity createResult = collection.insertDocument(doc, null); + final DocumentDeleteOptions options = new DocumentDeleteOptions().returnOld(true); + final DocumentDeleteEntity deleteResult = collection.deleteDocument(createResult.getKey(), + options, BaseDocument.class); + assertThat(deleteResult.getOld()).isNotNull(); + assertThat(deleteResult.getOld()).isInstanceOf(BaseDocument.class); + assertThat(deleteResult.getOld().getAttribute("a")).isNotNull(); + assertThat(String.valueOf(deleteResult.getOld().getAttribute("a"))).isEqualTo("test"); + } + + @ParameterizedTest + @MethodSource("cols") + void deleteDocumentIfMatch(ArangoCollection collection) { + final BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + final DocumentCreateEntity createResult = collection.insertDocument(doc, null); + final DocumentDeleteOptions options = new DocumentDeleteOptions().ifMatch(createResult.getRev()); + collection.deleteDocument(createResult.getKey(), options); + final BaseDocument document = collection.getDocument(createResult.getKey(), BaseDocument.class, null); + assertThat(document).isNull(); + } + + @ParameterizedTest + @MethodSource("cols") + void deleteDocumentIfMatchFail(ArangoCollection collection) { + final BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + final DocumentCreateEntity createResult = collection.insertDocument(doc); + final DocumentDeleteOptions options = new DocumentDeleteOptions().ifMatch("no"); + Throwable thrown = catchThrowable(() -> collection.deleteDocument(createResult.getKey(), options)); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + } + + @ParameterizedTest + @MethodSource("cols") + void deleteDocuments(ArangoCollection collection) { + DocumentCreateEntity a = collection.insertDocument(new BaseDocument()); + DocumentCreateEntity b = collection.insertDocument(new BaseDocument()); + MultiDocumentEntity> info = collection.deleteDocuments( + Arrays.asList(a.getKey(), b.getKey())); + assertThat(info).isNotNull(); + assertThat(info.getDocuments()).hasSize(2); + assertThat(info.getErrors()).isEmpty(); + } + + @ParameterizedTest + @MethodSource("cols") + void deleteDocumentsWithRevs(ArangoCollection collection) { + DocumentCreateEntity a = collection.insertDocument(new BaseDocument()); + DocumentCreateEntity b = collection.insertDocument(new BaseDocument()); + MultiDocumentEntity> info = collection.deleteDocuments( + Arrays.asList( + JsonNodeFactory.instance.objectNode() + .put("_key", a.getKey()) + .put("_rev", a.getRev()), + JsonNodeFactory.instance.objectNode() + .put("_key", b.getKey()) + .put("_rev", "wrong") + ), new DocumentDeleteOptions().ignoreRevs(false)); + assertThat(info).isNotNull(); + assertThat(info.getDocuments()).hasSize(1); + assertThat(info.getDocuments().get(0).getKey()).isEqualTo(a.getKey()); + assertThat(info.getErrors()).hasSize(1); + } + + @ParameterizedTest + @MethodSource("cols") + void deleteDocumentSilent(ArangoCollection collection) { + assumeTrue(isSingleServer()); + final DocumentCreateEntity createResult = collection.insertDocument(new BaseDocument()); + final DocumentDeleteEntity meta = collection.deleteDocument(createResult.getKey(), + new DocumentDeleteOptions().silent(true), BaseDocument.class); + assertThat(meta).isNotNull(); + assertThat(meta.getId()).isNull(); + assertThat(meta.getKey()).isNull(); + assertThat(meta.getRev()).isNull(); + } + + @ParameterizedTest + @MethodSource("cols") + void deleteDocumentsSilent(ArangoCollection collection) { + assumeTrue(isSingleServer()); + final DocumentCreateEntity createResult = collection.insertDocument(new BaseDocument()); + final MultiDocumentEntity> info = collection.deleteDocuments( + Collections.singletonList(createResult.getKey()), + new DocumentDeleteOptions().silent(true), + BaseDocument.class); + assertThat(info).isNotNull(); + assertThat(info.getDocuments()).isEmpty(); + assertThat(info.getDocumentsAndErrors()).isEmpty(); + assertThat(info.getErrors()).isEmpty(); + } + + @ParameterizedTest + @MethodSource("cols") + void deleteDocumentRefillIndexCaches(ArangoCollection collection) { + DocumentCreateEntity createResult = collection.insertDocument(new BaseDocument()); + DocumentDeleteEntity deleteResult = collection.deleteDocument(createResult.getKey(), + new DocumentDeleteOptions().refillIndexCaches(true)); + assertThat(deleteResult.getRev()) + .isNotNull() + .isEqualTo(createResult.getRev()); + } + + @ParameterizedTest + @MethodSource("cols") + void deleteDocumentsRefillIndexCaches(ArangoCollection collection) { + assumeTrue(isSingleServer()); + final DocumentCreateEntity createResult = collection.insertDocument(new BaseDocument()); + final MultiDocumentEntity> info = collection.deleteDocuments( + Collections.singletonList(createResult.getKey()), + new DocumentDeleteOptions().refillIndexCaches(true), + BaseDocument.class); + assertThat(info.getErrors()).isEmpty(); + } + + @ParameterizedTest + @MethodSource("cols") + void getIndex(ArangoCollection collection) { + final Collection fields = new ArrayList<>(); + fields.add("a"); + final IndexEntity createResult = collection.ensurePersistentIndex(fields, null); + final IndexEntity readResult = collection.getIndex(createResult.getId()); + assertThat(readResult.getId()).isEqualTo(createResult.getId()); + assertThat(readResult.getType()).isEqualTo(createResult.getType()); + } + + @ParameterizedTest + @MethodSource("cols") + void getIndexByKey(ArangoCollection collection) { + final Collection fields = new ArrayList<>(); + fields.add("a"); + final IndexEntity createResult = collection.ensurePersistentIndex(fields, null); + final IndexEntity readResult = collection.getIndex(createResult.getId().split("/")[1]); + assertThat(readResult.getId()).isEqualTo(createResult.getId()); + assertThat(readResult.getType()).isEqualTo(createResult.getType()); + } + + @ParameterizedTest + @MethodSource("cols") + void deleteIndex(ArangoCollection collection) { + final Collection fields = new ArrayList<>(); + fields.add("a"); + final IndexEntity createResult = collection.ensurePersistentIndex(fields, null); + final String id = collection.deleteIndex(createResult.getId()); + assertThat(id).isEqualTo(createResult.getId()); + Throwable thrown = catchThrowable(() -> collection.db().getIndex(id)); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + } + + @ParameterizedTest + @MethodSource("cols") + void deleteIndexByKey(ArangoCollection collection) { + final Collection fields = new ArrayList<>(); + fields.add("a"); + final IndexEntity createResult = collection.ensurePersistentIndex(fields, null); + final String id = collection.deleteIndex(createResult.getId().split("/")[1]); + assertThat(id).isEqualTo(createResult.getId()); + Throwable thrown = catchThrowable(() -> collection.db().getIndex(id)); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + } + + @ParameterizedTest + @MethodSource("cols") + void createGeoIndex(ArangoCollection collection) { + String f1 = "field-" + rnd(); + final Collection fields = Collections.singletonList(f1); + final IndexEntity indexResult = collection.ensureGeoIndex(fields, null); + assertThat(indexResult).isNotNull(); + assertThat(indexResult.getFields()).contains(f1); + assertThat(indexResult.getId()).startsWith(COLLECTION_NAME); + assertThat(indexResult.getIsNewlyCreated()).isTrue(); + assertThat(indexResult.getMinLength()).isNull(); + assertThat(indexResult.getSparse()).isTrue(); + assertThat(indexResult.getUnique()).isFalse(); + if (isAtLeastVersion(3, 4)) { + assertThat(indexResult.getType()).isEqualTo(IndexType.geo); + } else { + assertThat(indexResult.getType()).isEqualTo(IndexType.geo1); + } + if (isAtLeastVersion(3, 10)) { + assertThat(indexResult.getLegacyPolygons()).isFalse(); + } + } + + @ParameterizedTest + @MethodSource("cols") + void createGeoIndexWithOptions(ArangoCollection collection) { + assumeTrue(isAtLeastVersion(3, 5)); + + String name = "geoIndex-" + rnd(); + final GeoIndexOptions options = new GeoIndexOptions(); + options.name(name); + + String f1 = "field-" + rnd(); + final Collection fields = Collections.singletonList(f1); + final IndexEntity indexResult = collection.ensureGeoIndex(fields, options); + assertThat(indexResult).isNotNull(); + assertThat(indexResult.getFields()).contains(f1); + assertThat(indexResult.getId()).startsWith(COLLECTION_NAME); + assertThat(indexResult.getIsNewlyCreated()).isTrue(); + assertThat(indexResult.getMinLength()).isNull(); + assertThat(indexResult.getSparse()).isTrue(); + assertThat(indexResult.getUnique()).isFalse(); + if (isAtLeastVersion(3, 4)) { + assertThat(indexResult.getType()).isEqualTo(IndexType.geo); + } else { + assertThat(indexResult.getType()).isEqualTo(IndexType.geo1); + } + assertThat(indexResult.getName()).isEqualTo(name); + if (isAtLeastVersion(3, 10)) { + assertThat(indexResult.getLegacyPolygons()).isFalse(); + } + } + + @ParameterizedTest + @MethodSource("cols") + void createGeoIndexLegacyPolygons(ArangoCollection collection) { + assumeTrue(isAtLeastVersion(3, 10)); + + String name = "geoIndex-" + rnd(); + final GeoIndexOptions options = new GeoIndexOptions(); + options.name(name); + options.legacyPolygons(true); + + String f1 = "field-" + rnd(); + final Collection fields = Collections.singletonList(f1); + final IndexEntity indexResult = collection.ensureGeoIndex(fields, options); + assertThat(indexResult).isNotNull(); + assertThat(indexResult.getFields()).contains(f1); + assertThat(indexResult.getId()).startsWith(COLLECTION_NAME); + assertThat(indexResult.getIsNewlyCreated()).isTrue(); + assertThat(indexResult.getMinLength()).isNull(); + assertThat(indexResult.getSparse()).isTrue(); + assertThat(indexResult.getUnique()).isFalse(); + if (isAtLeastVersion(3, 4)) { + assertThat(indexResult.getType()).isEqualTo(IndexType.geo); + } else { + assertThat(indexResult.getType()).isEqualTo(IndexType.geo1); + } + assertThat(indexResult.getName()).isEqualTo(name); + if (isAtLeastVersion(3, 10)) { + assertThat(indexResult.getLegacyPolygons()).isTrue(); + } + } + + @ParameterizedTest + @MethodSource("cols") + void createGeo2Index(ArangoCollection collection) { + String f1 = "field-" + rnd(); + String f2 = "field-" + rnd(); + final Collection fields = Arrays.asList(f1, f2); + + final IndexEntity indexResult = collection.ensureGeoIndex(fields, null); + assertThat(indexResult).isNotNull(); + assertThat(indexResult.getFields()).contains(f1); + assertThat(indexResult.getFields()).contains(f2); + assertThat(indexResult.getId()).startsWith(COLLECTION_NAME); + assertThat(indexResult.getIsNewlyCreated()).isTrue(); + assertThat(indexResult.getMinLength()).isNull(); + assertThat(indexResult.getSparse()).isTrue(); + assertThat(indexResult.getUnique()).isFalse(); + if (isAtLeastVersion(3, 4)) { + assertThat(indexResult.getType()).isEqualTo(IndexType.geo); + } else { + assertThat(indexResult.getType()).isEqualTo(IndexType.geo2); + } + } + + @ParameterizedTest + @MethodSource("cols") + void createGeo2IndexWithOptions(ArangoCollection collection) { + assumeTrue(isAtLeastVersion(3, 5)); + + String name = "geoIndex-" + rnd(); + final GeoIndexOptions options = new GeoIndexOptions(); + options.name(name); + + String f1 = "field-" + rnd(); + String f2 = "field-" + rnd(); + final Collection fields = Arrays.asList(f1, f2); + + final IndexEntity indexResult = collection.ensureGeoIndex(fields, options); + assertThat(indexResult).isNotNull(); + assertThat(indexResult.getFields()).contains(f1); + assertThat(indexResult.getFields()).contains(f2); + assertThat(indexResult.getId()).startsWith(COLLECTION_NAME); + assertThat(indexResult.getIsNewlyCreated()).isTrue(); + assertThat(indexResult.getMinLength()).isNull(); + assertThat(indexResult.getSparse()).isTrue(); + assertThat(indexResult.getUnique()).isFalse(); + if (isAtLeastVersion(3, 4)) { + assertThat(indexResult.getType()).isEqualTo(IndexType.geo); + } else { + assertThat(indexResult.getType()).isEqualTo(IndexType.geo2); + } + assertThat(indexResult.getName()).isEqualTo(name); + } + + @ParameterizedTest + @MethodSource("cols") + void createPersistentIndex(ArangoCollection collection) { + String f1 = "field-" + rnd(); + String f2 = "field-" + rnd(); + final Collection fields = Arrays.asList(f1, f2); + + final IndexEntity indexResult = collection.ensurePersistentIndex(fields, null); + assertThat(indexResult).isNotNull(); + assertThat(indexResult.getConstraint()).isNull(); + assertThat(indexResult.getFields()).contains(f1); + assertThat(indexResult.getFields()).contains(f2); + assertThat(indexResult.getId()).startsWith(COLLECTION_NAME); + assertThat(indexResult.getIsNewlyCreated()).isTrue(); + assertThat(indexResult.getMinLength()).isNull(); + assertThat(indexResult.getSparse()).isFalse(); + assertThat(indexResult.getType()).isEqualTo(IndexType.persistent); + assertThat(indexResult.getUnique()).isFalse(); + assertThat(indexResult.getDeduplicate()).isTrue(); + if (isAtLeastVersion(3, 10)) { + assertThat(indexResult.getCacheEnabled()).isFalse(); + } + } + + @ParameterizedTest + @MethodSource("cols") + void createPersistentIndexCacheEnabled(ArangoCollection collection) { + assumeTrue(isAtLeastVersion(3, 10)); + + String f1 = "field-" + rnd(); + String f2 = "field-" + rnd(); + final Collection fields = Arrays.asList(f1, f2); + + final IndexEntity indexResult = collection.ensurePersistentIndex(fields, new PersistentIndexOptions().cacheEnabled(true)); + assertThat(indexResult).isNotNull(); + assertThat(indexResult.getConstraint()).isNull(); + assertThat(indexResult.getFields()).contains(f1); + assertThat(indexResult.getFields()).contains(f2); + assertThat(indexResult.getId()).startsWith(COLLECTION_NAME); + assertThat(indexResult.getIsNewlyCreated()).isTrue(); + assertThat(indexResult.getMinLength()).isNull(); + assertThat(indexResult.getSparse()).isFalse(); + assertThat(indexResult.getType()).isEqualTo(IndexType.persistent); + assertThat(indexResult.getUnique()).isFalse(); + assertThat(indexResult.getDeduplicate()).isTrue(); + assertThat(indexResult.getCacheEnabled()).isTrue(); + } + + @ParameterizedTest + @MethodSource("cols") + void createPersistentIndexStoredValues(ArangoCollection collection) { + assumeTrue(isAtLeastVersion(3, 10)); + + String f1 = "field-" + rnd(); + String f2 = "field-" + rnd(); + final Collection fields = Arrays.asList(f1, f2); + + final IndexEntity indexResult = collection.ensurePersistentIndex(fields, new PersistentIndexOptions().storedValues("v1", "v2")); + assertThat(indexResult).isNotNull(); + assertThat(indexResult.getConstraint()).isNull(); + assertThat(indexResult.getFields()).contains(f1); + assertThat(indexResult.getFields()).contains(f2); + assertThat(indexResult.getId()).startsWith(COLLECTION_NAME); + assertThat(indexResult.getIsNewlyCreated()).isTrue(); + assertThat(indexResult.getMinLength()).isNull(); + assertThat(indexResult.getSparse()).isFalse(); + assertThat(indexResult.getType()).isEqualTo(IndexType.persistent); + assertThat(indexResult.getUnique()).isFalse(); + assertThat(indexResult.getDeduplicate()).isTrue(); + assertThat(indexResult.getCacheEnabled()).isFalse(); + assertThat(indexResult.getStoredValues()) + .hasSize(2) + .contains("v1", "v2"); + } + + @ParameterizedTest + @MethodSource("cols") + void createPersistentIndexWithOptions(ArangoCollection collection) { + assumeTrue(isAtLeastVersion(3, 5)); + + String name = "persistentIndex-" + rnd(); + final PersistentIndexOptions options = new PersistentIndexOptions(); + options.name(name); + + String f1 = "field-" + rnd(); + String f2 = "field-" + rnd(); + + final Collection fields = Arrays.asList(f1, f2); + final IndexEntity indexResult = collection.ensurePersistentIndex(fields, options); + assertThat(indexResult).isNotNull(); + assertThat(indexResult.getConstraint()).isNull(); + assertThat(indexResult.getFields()).contains(f1); + assertThat(indexResult.getFields()).contains(f2); + assertThat(indexResult.getId()).startsWith(COLLECTION_NAME); + assertThat(indexResult.getIsNewlyCreated()).isTrue(); + assertThat(indexResult.getMinLength()).isNull(); + assertThat(indexResult.getSparse()).isFalse(); + assertThat(indexResult.getType()).isEqualTo(IndexType.persistent); + assertThat(indexResult.getUnique()).isFalse(); + assertThat(indexResult.getName()).isEqualTo(name); + } + + @ParameterizedTest + @MethodSource("cols") + void createZKDIndex(ArangoCollection collection) { + assumeTrue(isAtLeastVersion(3, 9)); + collection.truncate(); + String f1 = "field-" + rnd(); + String f2 = "field-" + rnd(); + final Collection fields = Arrays.asList(f1, f2); + + final IndexEntity indexResult = collection.ensureZKDIndex(fields, null); + assertThat(indexResult).isNotNull(); + assertThat(indexResult.getConstraint()).isNull(); + assertThat(indexResult.getFields()).contains(f1); + assertThat(indexResult.getFields()).contains(f2); + assertThat(indexResult.getId()).startsWith(COLLECTION_NAME); + assertThat(indexResult.getIsNewlyCreated()).isTrue(); + assertThat(indexResult.getMinLength()).isNull(); + assertThat(indexResult.getType()).isEqualTo(IndexType.zkd); + assertThat(indexResult.getUnique()).isFalse(); + collection.deleteIndex(indexResult.getId()); + } + + @ParameterizedTest + @MethodSource("cols") + void createZKDIndexWithOptions(ArangoCollection collection) { + assumeTrue(isAtLeastVersion(3, 9)); + collection.truncate(); + + String name = "ZKDIndex-" + rnd(); + final ZKDIndexOptions options = + new ZKDIndexOptions().name(name).fieldValueTypes(ZKDIndexOptions.FieldValueTypes.DOUBLE); + + String f1 = "field-" + rnd(); + String f2 = "field-" + rnd(); + + final Collection fields = Arrays.asList(f1, f2); + final IndexEntity indexResult = collection.ensureZKDIndex(fields, options); + assertThat(indexResult).isNotNull(); + assertThat(indexResult.getConstraint()).isNull(); + assertThat(indexResult.getFields()).contains(f1); + assertThat(indexResult.getFields()).contains(f2); + assertThat(indexResult.getId()).startsWith(COLLECTION_NAME); + assertThat(indexResult.getIsNewlyCreated()).isTrue(); + assertThat(indexResult.getMinLength()).isNull(); + assertThat(indexResult.getType()).isEqualTo(IndexType.zkd); + assertThat(indexResult.getUnique()).isFalse(); + assertThat(indexResult.getName()).isEqualTo(name); + collection.deleteIndex(indexResult.getId()); + } + + @ParameterizedTest + @MethodSource("cols") + void createMDIndex(ArangoCollection collection) { + assumeTrue(isAtLeastVersion(3, 12)); + collection.truncate(); + + String f1 = "field-" + rnd(); + String f2 = "field-" + rnd(); + + final IndexEntity indexResult = collection.ensureMDIndex(Arrays.asList(f1, f2), null); + assertThat(indexResult).isNotNull(); + assertThat(indexResult.getConstraint()).isNull(); + assertThat(indexResult.getFields()).contains(f1, f2); + assertThat(indexResult.getFieldValueTypes()).isEqualTo(MDIFieldValueTypes.DOUBLE); + assertThat(indexResult.getId()).startsWith(COLLECTION_NAME); + assertThat(indexResult.getIsNewlyCreated()).isTrue(); + assertThat(indexResult.getMinLength()).isNull(); + assertThat(indexResult.getType()).isEqualTo(IndexType.mdi); + assertThat(indexResult.getUnique()).isFalse(); + collection.deleteIndex(indexResult.getId()); + } + + @ParameterizedTest + @MethodSource("cols") + void createMDIndexWithOptions(ArangoCollection collection) { + assumeTrue(isAtLeastVersion(3, 12)); + collection.truncate(); + + String name = "MDIndex-" + rnd(); + final MDIndexOptions options = new MDIndexOptions() + .name(name) + .unique(false) + .fieldValueTypes(MDIFieldValueTypes.DOUBLE) + .estimates(false) + .sparse(true) + .storedValues(Arrays.asList("v1", "v2")); + + String f1 = "field-" + rnd(); + String f2 = "field-" + rnd(); + + final IndexEntity indexResult = collection.ensureMDIndex(Arrays.asList(f1, f2), options); + assertThat(indexResult.getType()).isEqualTo(IndexType.mdi); + assertThat(indexResult.getId()).startsWith(COLLECTION_NAME); + assertThat(indexResult.getName()).isEqualTo(name); + assertThat(indexResult.getUnique()).isFalse(); + assertThat(indexResult.getEstimates()).isFalse(); + assertThat(indexResult.getSparse()).isTrue(); + assertThat(indexResult.getStoredValues()) + .hasSize(2) + .contains("v1", "v2"); + assertThat(indexResult.getFields()).contains(f1, f2); + assertThat(indexResult.getFieldValueTypes()).isEqualTo(MDIFieldValueTypes.DOUBLE); + assertThat(indexResult.getIsNewlyCreated()).isTrue(); + collection.deleteIndex(indexResult.getId()); + } + + @ParameterizedTest + @MethodSource("cols") + void createMDPrefixedIndexWithOptions(ArangoCollection collection) { + assumeTrue(isAtLeastVersion(3, 12)); + collection.truncate(); + + String name = "MDPrefixedIndex-" + rnd(); + final MDPrefixedIndexOptions options = new MDPrefixedIndexOptions() + .name(name) + .unique(false) + .fieldValueTypes(MDIFieldValueTypes.DOUBLE) + .estimates(false) + .sparse(true) + .storedValues(Arrays.asList("v1", "v2")) + .prefixFields(Arrays.asList("p1", "p2")); + + String f1 = "field-" + rnd(); + String f2 = "field-" + rnd(); + + final IndexEntity indexResult = collection.ensureMDPrefixedIndex(Arrays.asList(f1, f2), options); + assertThat(indexResult.getType()).isEqualTo(IndexType.mdiPrefixed); + assertThat(indexResult.getId()).startsWith(COLLECTION_NAME); + assertThat(indexResult.getName()).isEqualTo(name); + assertThat(indexResult.getUnique()).isFalse(); + assertThat(indexResult.getEstimates()).isFalse(); + assertThat(indexResult.getSparse()).isTrue(); + assertThat(indexResult.getStoredValues()) + .hasSize(2) + .contains("v1", "v2"); + assertThat(indexResult.getFields()).contains(f1, f2); + assertThat(indexResult.getFieldValueTypes()).isEqualTo(MDIFieldValueTypes.DOUBLE); + assertThat(indexResult.getPrefixFields()).contains("p1", "p2"); + assertThat(indexResult.getIsNewlyCreated()).isTrue(); + collection.deleteIndex(indexResult.getId()); + } + + @ParameterizedTest + @MethodSource("cols") + void indexEstimates(ArangoCollection collection) { + assumeTrue(isAtLeastVersion(3, 8)); + assumeTrue(isSingleServer()); + + String name = "persistentIndex-" + rnd(); + final PersistentIndexOptions options = new PersistentIndexOptions(); + options.name(name); + options.estimates(true); + + String f1 = "field-" + rnd(); + String f2 = "field-" + rnd(); + + final Collection fields = Arrays.asList(f1, f2); + final IndexEntity indexResult = collection.ensurePersistentIndex(fields, options); + assertThat(indexResult).isNotNull(); + assertThat(indexResult.getEstimates()).isTrue(); + assertThat(indexResult.getSelectivityEstimate()).isNotNull(); + } + + @ParameterizedTest + @MethodSource("cols") + void indexEstimatesFalse(ArangoCollection collection) { + assumeTrue(isAtLeastVersion(3, 8)); + assumeTrue(isSingleServer()); + + String name = "persistentIndex-" + rnd(); + final PersistentIndexOptions options = new PersistentIndexOptions(); + options.name(name); + options.estimates(false); + + String f1 = "field-" + rnd(); + String f2 = "field-" + rnd(); + + final Collection fields = Arrays.asList(f1, f2); + final IndexEntity indexResult = collection.ensurePersistentIndex(fields, options); + assertThat(indexResult).isNotNull(); + assertThat(indexResult.getEstimates()).isFalse(); + assertThat(indexResult.getSelectivityEstimate()).isNull(); + } + + @ParameterizedTest + @MethodSource("cols") + void indexDeduplicate(ArangoCollection collection) { + assumeTrue(isAtLeastVersion(3, 8)); + + String name = "persistentIndex-" + rnd(); + final PersistentIndexOptions options = new PersistentIndexOptions(); + options.name(name); + options.deduplicate(true); + + String f1 = "field-" + rnd(); + String f2 = "field-" + rnd(); + + final Collection fields = Arrays.asList(f1, f2); + final IndexEntity indexResult = collection.ensurePersistentIndex(fields, options); + assertThat(indexResult).isNotNull(); + assertThat(indexResult.getDeduplicate()).isTrue(); + } + + @ParameterizedTest + @MethodSource("cols") + void indexDeduplicateFalse(ArangoCollection collection) { + assumeTrue(isAtLeastVersion(3, 8)); + + String name = "persistentIndex-" + rnd(); + final PersistentIndexOptions options = new PersistentIndexOptions(); + options.name(name); + options.deduplicate(false); + + String f1 = "field-" + rnd(); + String f2 = "field-" + rnd(); + + final Collection fields = Arrays.asList(f1, f2); + final IndexEntity indexResult = collection.ensurePersistentIndex(fields, options); + assertThat(indexResult).isNotNull(); + assertThat(indexResult.getDeduplicate()).isFalse(); + } + + @ParameterizedTest + @MethodSource("cols") + void createFulltextIndex(ArangoCollection collection) { + String f1 = "field-" + rnd(); + final Collection fields = Collections.singletonList(f1); + final IndexEntity indexResult = collection.ensureFulltextIndex(fields, null); + assertThat(indexResult).isNotNull(); + assertThat(indexResult.getConstraint()).isNull(); + assertThat(indexResult.getFields()).contains(f1); + assertThat(indexResult.getId()).startsWith(COLLECTION_NAME); + assertThat(indexResult.getIsNewlyCreated()).isTrue(); + assertThat(indexResult.getSparse()).isTrue(); + assertThat(indexResult.getType()).isEqualTo(IndexType.fulltext); + assertThat(indexResult.getUnique()).isFalse(); + } + + @ParameterizedTest + @MethodSource("cols") + void createFulltextIndexWithOptions(ArangoCollection collection) { + assumeTrue(isAtLeastVersion(3, 5)); + + String name = "fulltextIndex-" + rnd(); + final FulltextIndexOptions options = new FulltextIndexOptions(); + options.name(name); + + String f = "field-" + rnd(); + final Collection fields = Collections.singletonList(f); + final IndexEntity indexResult = collection.ensureFulltextIndex(fields, options); + assertThat(indexResult).isNotNull(); + assertThat(indexResult.getConstraint()).isNull(); + assertThat(indexResult.getFields()).contains(f); + assertThat(indexResult.getId()).startsWith(COLLECTION_NAME); + assertThat(indexResult.getIsNewlyCreated()).isTrue(); + assertThat(indexResult.getSparse()).isTrue(); + assertThat(indexResult.getType()).isEqualTo(IndexType.fulltext); + assertThat(indexResult.getUnique()).isFalse(); + assertThat(indexResult.getName()).isEqualTo(name); + } + + @ParameterizedTest + @MethodSource("cols") + void createTtlIndexWithoutOptions(ArangoCollection collection) { + assumeTrue(isAtLeastVersion(3, 5)); + final Collection fields = new ArrayList<>(); + fields.add("a"); + + Throwable thrown = catchThrowable(() -> collection.ensureTtlIndex(fields, null)); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + ArangoDBException e = (ArangoDBException) thrown; + assertThat(e.getResponseCode()).isEqualTo(400); + assertThat(e.getErrorNum()).isEqualTo(10); + assertThat(e.getMessage()).contains("expireAfter attribute must be a number"); + } + + @ParameterizedTest + @MethodSource("cols") + void createTtlIndexWithOptions(ArangoCollection collection) { + assumeTrue(isAtLeastVersion(3, 5)); + + String f1 = "field-" + rnd(); + final Collection fields = Collections.singletonList(f1); + + String name = "ttlIndex-" + rnd(); + final TtlIndexOptions options = new TtlIndexOptions(); + options.name(name); + options.expireAfter(3600); + + final IndexEntity indexResult = collection.ensureTtlIndex(fields, options); + assertThat(indexResult).isNotNull(); + assertThat(indexResult.getFields()).contains(f1); + assertThat(indexResult.getId()).startsWith(COLLECTION_NAME); + assertThat(indexResult.getIsNewlyCreated()).isTrue(); + assertThat(indexResult.getType()).isEqualTo(IndexType.ttl); + assertThat(indexResult.getExpireAfter()).isEqualTo(3600); + assertThat(indexResult.getName()).isEqualTo(name); + + // revert changes + collection.deleteIndex(indexResult.getId()); + } + + @ParameterizedTest + @MethodSource("cols") + void getIndexes(ArangoCollection collection) { + String f1 = "field-" + rnd(); + final Collection fields = Collections.singletonList(f1); + collection.ensurePersistentIndex(fields, null); + long matchingIndexes = + collection.getIndexes().stream().filter(i -> i.getType() == IndexType.persistent).filter(i -> i.getFields().contains(f1)).count(); + assertThat(matchingIndexes).isEqualTo(1L); + } + + @ParameterizedTest + @MethodSource("edges") + void getEdgeIndex(ArangoCollection edgeCollection) { + Collection indexes = edgeCollection.getIndexes(); + long primaryIndexes = indexes.stream().filter(i -> i.getType() == IndexType.primary).count(); + long edgeIndexes = indexes.stream().filter(i -> i.getType() == IndexType.primary).count(); + assertThat(primaryIndexes).isEqualTo(1L); + assertThat(edgeIndexes).isEqualTo(1L); + } + + @ParameterizedTest + @MethodSource("cols") + void exists(ArangoCollection collection) { + assertThat(collection.exists()).isTrue(); + assertThat(collection.db().collection(COLLECTION_NAME + "no").exists()).isFalse(); + } + + @ParameterizedTest + @MethodSource("cols") + void truncate(ArangoCollection collection) { + final BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + collection.insertDocument(doc, null); + final BaseDocument readResult = collection.getDocument(doc.getKey(), BaseDocument.class, null); + assertThat(readResult.getKey()).isEqualTo(doc.getKey()); + final CollectionEntity truncateResult = collection.truncate(); + assertThat(truncateResult).isNotNull(); + assertThat(truncateResult.getId()).isNotNull(); + final BaseDocument document = collection.getDocument(doc.getKey(), BaseDocument.class, null); + assertThat(document).isNull(); + } + + @ParameterizedTest + @MethodSource("cols") + void getCount(ArangoCollection collection) { + Long initialCount = collection.count().getCount(); + collection.insertDocument(RawJson.of("{}")); + final CollectionPropertiesEntity count = collection.count(); + assertThat(count.getCount()).isEqualTo(initialCount + 1L); + } + + @ParameterizedTest + @MethodSource("cols") + void documentExists(ArangoCollection collection) { + final Boolean existsNot = collection.documentExists(rnd(), null); + assertThat(existsNot).isFalse(); + + String key = rnd(); + RawJson rawJson = RawJson.of("{\"_key\":\"" + key + "\"}"); + collection.insertDocument(rawJson); + final Boolean exists = collection.documentExists(key, null); + assertThat(exists).isTrue(); + } + + @ParameterizedTest + @MethodSource("cols") + void documentExistsIfMatch(ArangoCollection collection) { + String key = rnd(); + RawJson rawJson = RawJson.of("{\"_key\":\"" + key + "\"}"); + final DocumentCreateEntity createResult = collection.insertDocument(rawJson); + final DocumentExistsOptions options = new DocumentExistsOptions().ifMatch(createResult.getRev()); + final Boolean exists = collection.documentExists(key, options); + assertThat(exists).isTrue(); + } + + @ParameterizedTest + @MethodSource("cols") + void documentExistsIfMatchFail(ArangoCollection collection) { + String key = rnd(); + RawJson rawJson = RawJson.of("{\"_key\":\"" + key + "\"}"); + collection.insertDocument(rawJson); + final DocumentExistsOptions options = new DocumentExistsOptions().ifMatch("no"); + final Boolean exists = collection.documentExists(key, options); + assertThat(exists).isFalse(); + } + + @ParameterizedTest + @MethodSource("cols") + void documentExistsIfNoneMatch(ArangoCollection collection) { + String key = rnd(); + RawJson rawJson = RawJson.of("{\"_key\":\"" + key + "\"}"); + collection.insertDocument(rawJson); + final DocumentExistsOptions options = new DocumentExistsOptions().ifNoneMatch("no"); + final Boolean exists = collection.documentExists(key, options); + assertThat(exists).isTrue(); + } + + @ParameterizedTest + @MethodSource("cols") + void documentExistsIfNoneMatchFail(ArangoCollection collection) { + String key = rnd(); + RawJson rawJson = RawJson.of("{\"_key\":\"" + key + "\"}"); + final DocumentCreateEntity createResult = collection.insertDocument(rawJson); + final DocumentExistsOptions options = new DocumentExistsOptions().ifNoneMatch(createResult.getRev()); + final Boolean exists = collection.documentExists(key, options); + assertThat(exists).isFalse(); + } + + @ParameterizedTest + @MethodSource("cols") + void insertDocuments(ArangoCollection collection) { + final Collection values = Arrays.asList(new BaseDocument(), new BaseDocument(), + new BaseDocument()); + + final MultiDocumentEntity docs = collection.insertDocuments(values); + assertThat(docs).isNotNull(); + assertThat(docs.getDocuments()).isNotNull(); + assertThat(docs.getDocuments()).hasSize(3); + assertThat(docs.getErrors()).isNotNull(); + assertThat(docs.getErrors()).isEmpty(); + } + + @ParameterizedTest + @MethodSource("cols") + void insertDocumentsReturnNewUserData(ArangoCollection collection) { + Cat a = new Cat(); + a.setKey(UUID.randomUUID().toString()); + a.setName("a"); + + Cat b = new Cat(); + b.setKey(UUID.randomUUID().toString()); + b.setName("b"); + + final List values = Arrays.asList(a, b); + MultiDocumentEntity> res = + collection.insertDocuments(values, new DocumentCreateOptions().returnNew(true), Cat.class); + assertThat(res).isNotNull(); + assertThat(res.getDocuments()) + .hasSize(2) + .anySatisfy(d -> { + assertThat(d.getKey()).isEqualTo(a.getKey()); + assertThat(d.getNew().getName()).isEqualTo(a.getName()); + }) + .anySatisfy(d -> { + assertThat(d.getKey()).isEqualTo(b.getKey()); + assertThat(d.getNew().getName()).isEqualTo(b.getName()); + }); + } + + @ParameterizedTest + @MethodSource("cols") + void insertDocumentsOverwriteModeUpdate(ArangoCollection collection) { + assumeTrue(isAtLeastVersion(3, 7)); + + final BaseDocument doc1 = new BaseDocument(UUID.randomUUID().toString()); + doc1.addAttribute("foo", "a"); + final DocumentCreateEntity meta1 = collection.insertDocument(doc1); + + final BaseDocument doc2 = new BaseDocument(UUID.randomUUID().toString()); + doc2.addAttribute("foo", "a"); + final DocumentCreateEntity meta2 = collection.insertDocument(doc2); + + doc1.addAttribute("bar", "b"); + doc2.addAttribute("bar", "b"); + + final MultiDocumentEntity> repsert = + collection.insertDocuments(Arrays.asList(doc1, doc2), + new DocumentCreateOptions().overwriteMode(OverwriteMode.update).returnNew(true), BaseDocument.class); + assertThat(repsert).isNotNull(); + assertThat(repsert.getDocuments()).hasSize(2); + assertThat(repsert.getErrors()).isEmpty(); + for (final DocumentCreateEntity documentCreateEntity : repsert.getDocuments()) { + assertThat(documentCreateEntity.getRev()).isNotEqualTo(meta1.getRev()); + assertThat(documentCreateEntity.getRev()).isNotEqualTo(meta2.getRev()); + assertThat(documentCreateEntity.getNew().getAttribute("foo")).isEqualTo("a"); + assertThat(documentCreateEntity.getNew().getAttribute("bar")).isEqualTo("b"); + } + } + + @ParameterizedTest + @MethodSource("cols") + void insertDocumentsJson(ArangoCollection collection) { + final Collection values = new ArrayList<>(); + values.add(RawJson.of("{}")); + values.add(RawJson.of("{}")); + values.add(RawJson.of("{}")); + final MultiDocumentEntity docs = collection.insertDocuments(values); + assertThat(docs).isNotNull(); + assertThat(docs.getDocuments()).isNotNull(); + assertThat(docs.getDocuments()).hasSize(3); + assertThat(docs.getErrors()).isNotNull(); + assertThat(docs.getErrors()).isEmpty(); + } + + @ParameterizedTest + @MethodSource("cols") + void insertDocumentsRawData(ArangoCollection collection) { + final RawData values = RawJson.of("[{},{},{}]"); + final MultiDocumentEntity docs = collection.insertDocuments(values); + assertThat(docs).isNotNull(); + assertThat(docs.getDocuments()).isNotNull(); + assertThat(docs.getDocuments()).hasSize(3); + assertThat(docs.getErrors()).isNotNull(); + assertThat(docs.getErrors()).isEmpty(); + } + + @ParameterizedTest + @MethodSource("cols") + void insertDocumentsRawDataReturnNew(ArangoCollection collection) { + final RawData values = RawJson.of("[{\"aaa\":33},{\"aaa\":33},{\"aaa\":33}]"); + final MultiDocumentEntity> docs = + collection.insertDocuments(values, new DocumentCreateOptions().returnNew(true)); + assertThat(docs).isNotNull(); + assertThat(docs.getDocuments()).isNotNull(); + assertThat(docs.getDocuments()).hasSize(3); + assertThat(docs.getErrors()).isNotNull(); + assertThat(docs.getErrors()).isEmpty(); + + for (final DocumentCreateEntity doc : docs.getDocuments()) { + RawData d = doc.getNew(); + assertThat(d) + .isNotNull() + .isInstanceOf(RawJson.class); + + JsonNode jn = SerdeUtils.INSTANCE.parseJson(((RawJson) d).get()); + assertThat(jn.has("aaa")).isTrue(); + assertThat(jn.get("aaa").intValue()).isEqualTo(33); + } + } + + @ParameterizedTest + @MethodSource("cols") + void insertDocumentsOne(ArangoCollection collection) { + final Collection values = new ArrayList<>(); + values.add(new BaseDocument()); + final MultiDocumentEntity docs = collection.insertDocuments(values); + assertThat(docs).isNotNull(); + assertThat(docs.getDocuments()).isNotNull(); + assertThat(docs.getDocuments()).hasSize(1); + assertThat(docs.getErrors()).isNotNull(); + assertThat(docs.getErrors()).isEmpty(); + } + + @ParameterizedTest + @MethodSource("cols") + void insertDocumentsEmpty(ArangoCollection collection) { + final Collection values = new ArrayList<>(); + final MultiDocumentEntity docs = collection.insertDocuments(values); + assertThat(docs).isNotNull(); + assertThat(docs.getDocuments()).isNotNull(); + assertThat(docs.getDocuments()).isEmpty(); + assertThat(docs.getErrors()).isNotNull(); + assertThat(docs.getErrors()).isEmpty(); + } + + @ParameterizedTest + @MethodSource("cols") + void insertDocumentsReturnNew(ArangoCollection collection) { + final Collection values = new ArrayList<>(); + values.add(new BaseDocument()); + values.add(new BaseDocument()); + values.add(new BaseDocument()); + final DocumentCreateOptions options = new DocumentCreateOptions().returnNew(true); + final MultiDocumentEntity> docs = collection.insertDocuments(values, + options, BaseDocument.class); + assertThat(docs).isNotNull(); + assertThat(docs.getDocuments()).isNotNull(); + assertThat(docs.getDocuments()).hasSize(3); + assertThat(docs.getErrors()).isNotNull(); + assertThat(docs.getErrors()).isEmpty(); + for (final DocumentCreateEntity doc : docs.getDocuments()) { + assertThat(doc.getNew()).isNotNull(); + final BaseDocument baseDocument = doc.getNew(); + assertThat(baseDocument.getId()).isNotNull(); + assertThat(baseDocument.getKey()).isNotNull(); + assertThat(baseDocument.getRevision()).isNotNull(); + } + } + + @ParameterizedTest + @MethodSource("cols") + void insertDocumentsFail(ArangoCollection collection) { + String k1 = rnd(); + String k2 = rnd(); + final Collection values = Arrays.asList(new BaseDocument(k1), new BaseDocument(k2), + new BaseDocument(k2)); + + final MultiDocumentEntity docs = collection.insertDocuments(values); + assertThat(docs).isNotNull(); + assertThat(docs.getDocuments()).isNotNull(); + assertThat(docs.getDocuments()).hasSize(2); + assertThat(docs.getErrors()).isNotNull(); + assertThat(docs.getErrors()).hasSize(1); + assertThat(docs.getErrors().iterator().next().getErrorNum()).isEqualTo(1210); + } + + @ParameterizedTest + @MethodSource("cols") + void importDocuments(ArangoCollection collection) { + final Collection values = Arrays.asList(new BaseDocument(), new BaseDocument(), + new BaseDocument()); + + final DocumentImportEntity docs = collection.importDocuments(values); + assertThat(docs).isNotNull(); + assertThat(docs.getCreated()).isEqualTo(values.size()); + assertThat(docs.getEmpty()).isZero(); + assertThat(docs.getErrors()).isZero(); + assertThat(docs.getIgnored()).isZero(); + assertThat(docs.getUpdated()).isZero(); + assertThat(docs.getDetails()).isEmpty(); + } + + @ParameterizedTest + @MethodSource("cols") + void importDocumentsJsonList(ArangoCollection collection) { + final Collection values = Arrays.asList( + RawJson.of("{}"), + RawJson.of("{}"), + RawJson.of("{}") + ); + + final DocumentImportEntity docs = collection.importDocuments(values); + assertThat(docs).isNotNull(); + assertThat(docs.getCreated()).isEqualTo(values.size()); + assertThat(docs.getEmpty()).isZero(); + assertThat(docs.getErrors()).isZero(); + assertThat(docs.getIgnored()).isZero(); + assertThat(docs.getUpdated()).isZero(); + assertThat(docs.getDetails()).isEmpty(); + } + + @ParameterizedTest + @MethodSource("cols") + void importDocumentsDuplicateDefaultError(ArangoCollection collection) { + String k1 = rnd(); + String k2 = rnd(); + + final Collection values = Arrays.asList(new BaseDocument(k1), new BaseDocument(k2), + new BaseDocument(k2)); + + final DocumentImportEntity docs = collection.importDocuments(values); + assertThat(docs).isNotNull(); + assertThat(docs.getCreated()).isEqualTo(2); + assertThat(docs.getEmpty()).isZero(); + assertThat(docs.getErrors()).isEqualTo(1); + assertThat(docs.getIgnored()).isZero(); + assertThat(docs.getUpdated()).isZero(); + assertThat(docs.getDetails()).isEmpty(); + } + + @ParameterizedTest + @MethodSource("cols") + void importDocumentsDuplicateError(ArangoCollection collection) { + String k1 = rnd(); + String k2 = rnd(); + + final Collection values = Arrays.asList(new BaseDocument(k1), new BaseDocument(k2), + new BaseDocument(k2)); + + final DocumentImportEntity docs = collection.importDocuments(values, + new DocumentImportOptions().onDuplicate(OnDuplicate.error)); + assertThat(docs).isNotNull(); + assertThat(docs.getCreated()).isEqualTo(2); + assertThat(docs.getEmpty()).isZero(); + assertThat(docs.getErrors()).isEqualTo(1); + assertThat(docs.getIgnored()).isZero(); + assertThat(docs.getUpdated()).isZero(); + assertThat(docs.getDetails()).isEmpty(); + } + + @ParameterizedTest + @MethodSource("cols") + void importDocumentsDuplicateIgnore(ArangoCollection collection) { + String k1 = rnd(); + String k2 = rnd(); + + final Collection values = Arrays.asList(new BaseDocument(k1), new BaseDocument(k2), + new BaseDocument(k2)); + + final DocumentImportEntity docs = collection.importDocuments(values, + new DocumentImportOptions().onDuplicate(OnDuplicate.ignore)); + assertThat(docs).isNotNull(); + assertThat(docs.getCreated()).isEqualTo(2); + assertThat(docs.getEmpty()).isZero(); + assertThat(docs.getErrors()).isZero(); + assertThat(docs.getIgnored()).isEqualTo(1); + assertThat(docs.getUpdated()).isZero(); + assertThat(docs.getDetails()).isEmpty(); + } + + @ParameterizedTest + @MethodSource("cols") + void importDocumentsDuplicateReplace(ArangoCollection collection) { + String k1 = rnd(); + String k2 = rnd(); + + final Collection values = Arrays.asList(new BaseDocument(k1), new BaseDocument(k2), + new BaseDocument(k2)); + + final DocumentImportEntity docs = collection.importDocuments(values, + new DocumentImportOptions().onDuplicate(OnDuplicate.replace)); + assertThat(docs).isNotNull(); + assertThat(docs.getCreated()).isEqualTo(2); + assertThat(docs.getEmpty()).isZero(); + assertThat(docs.getErrors()).isZero(); + assertThat(docs.getIgnored()).isZero(); + assertThat(docs.getUpdated()).isEqualTo(1); + assertThat(docs.getDetails()).isEmpty(); + } + + @ParameterizedTest + @MethodSource("cols") + void importDocumentsDuplicateUpdate(ArangoCollection collection) { + String k1 = rnd(); + String k2 = rnd(); + + final Collection values = Arrays.asList(new BaseDocument(k1), new BaseDocument(k2), + new BaseDocument(k2)); + + final DocumentImportEntity docs = collection.importDocuments(values, + new DocumentImportOptions().onDuplicate(OnDuplicate.update)); + assertThat(docs).isNotNull(); + assertThat(docs.getCreated()).isEqualTo(2); + assertThat(docs.getEmpty()).isZero(); + assertThat(docs.getErrors()).isZero(); + assertThat(docs.getIgnored()).isZero(); + assertThat(docs.getUpdated()).isEqualTo(1); + assertThat(docs.getDetails()).isEmpty(); + } + + @ParameterizedTest + @MethodSource("cols") + void importDocumentsCompleteFail(ArangoCollection collection) { + String k1 = rnd(); + String k2 = rnd(); + + final Collection values = Arrays.asList(new BaseDocument(k1), new BaseDocument(k2), + new BaseDocument(k2)); + + Throwable thrown = catchThrowable(() -> collection.importDocuments(values, + new DocumentImportOptions().complete(true))); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + ArangoDBException e = (ArangoDBException) thrown; + assertThat(e.getErrorNum()).isEqualTo(1210); + } + + @ParameterizedTest + @MethodSource("cols") + void importDocumentsDetails(ArangoCollection collection) { + String k1 = rnd(); + String k2 = rnd(); + + final Collection values = Arrays.asList(new BaseDocument(k1), new BaseDocument(k2), + new BaseDocument(k2)); + + final DocumentImportEntity docs = collection.importDocuments(values, new DocumentImportOptions().details(true)); + assertThat(docs).isNotNull(); + assertThat(docs.getCreated()).isEqualTo(2); + assertThat(docs.getEmpty()).isZero(); + assertThat(docs.getErrors()).isEqualTo(1); + assertThat(docs.getIgnored()).isZero(); + assertThat(docs.getUpdated()).isZero(); + assertThat(docs.getDetails()).hasSize(1); + assertThat(docs.getDetails().iterator().next()).contains("unique constraint violated"); + } + + @ParameterizedTest + @MethodSource("cols") + void importDocumentsOverwriteFalse(ArangoCollection collection) { + collection.insertDocument(new BaseDocument()); + Long initialCount = collection.count().getCount(); + + final Collection values = new ArrayList<>(); + values.add(new BaseDocument()); + values.add(new BaseDocument()); + collection.importDocuments(values, new DocumentImportOptions().overwrite(false)); + assertThat(collection.count().getCount()).isEqualTo(initialCount + 2L); + } + + @ParameterizedTest + @MethodSource("cols") + void importDocumentsOverwriteTrue(ArangoCollection collection) { + collection.insertDocument(new BaseDocument()); + + final Collection values = new ArrayList<>(); + values.add(new BaseDocument()); + values.add(new BaseDocument()); + collection.importDocuments(values, new DocumentImportOptions().overwrite(true)); + assertThat(collection.count().getCount()).isEqualTo(2L); + } + + @ParameterizedTest + @MethodSource("edges") + void importDocumentsFromToPrefix(ArangoCollection edgeCollection) { + final Collection values = new ArrayList<>(); + final String[] keys = {rnd(), rnd()}; + for (String s : keys) { + values.add(new BaseEdgeDocument(s, "from", "to")); + } + assertThat(values).hasSize(keys.length); + + final DocumentImportEntity importResult = edgeCollection.importDocuments(values, + new DocumentImportOptions().fromPrefix("foo").toPrefix("bar")); + assertThat(importResult).isNotNull(); + assertThat(importResult.getCreated()).isEqualTo(values.size()); + for (String key : keys) { + final BaseEdgeDocument doc = edgeCollection.getDocument(key, BaseEdgeDocument.class); + assertThat(doc).isNotNull(); + assertThat(doc.getFrom()).isEqualTo("foo/from"); + assertThat(doc.getTo()).isEqualTo("bar/to"); + } + } + + @ParameterizedTest + @MethodSource("cols") + void importDocumentsJson(ArangoCollection collection) throws JsonProcessingException { + final String values = mapper.writeValueAsString(Arrays.asList(Collections.singletonMap("_key", rnd()), + Collections.singletonMap("_key", rnd()))); + + final DocumentImportEntity docs = collection.importDocuments(RawJson.of(values)); + assertThat(docs).isNotNull(); + assertThat(docs.getCreated()).isEqualTo(2); + assertThat(docs.getEmpty()).isZero(); + assertThat(docs.getErrors()).isZero(); + assertThat(docs.getIgnored()).isZero(); + assertThat(docs.getUpdated()).isZero(); + assertThat(docs.getDetails()).isEmpty(); + } + + @ParameterizedTest + @MethodSource("cols") + void importDocumentsJsonDuplicateDefaultError(ArangoCollection collection) throws JsonProcessingException { + String k1 = rnd(); + String k2 = rnd(); + + final String values = mapper.writeValueAsString(Arrays.asList(Collections.singletonMap("_key", k1), + Collections.singletonMap("_key", k2), Collections.singletonMap("_key", k2))); + + final DocumentImportEntity docs = collection.importDocuments(RawJson.of(values)); + assertThat(docs).isNotNull(); + assertThat(docs.getCreated()).isEqualTo(2); + assertThat(docs.getEmpty()).isZero(); + assertThat(docs.getErrors()).isEqualTo(1); + assertThat(docs.getIgnored()).isZero(); + assertThat(docs.getUpdated()).isZero(); + assertThat(docs.getDetails()).isEmpty(); + } + + @ParameterizedTest + @MethodSource("cols") + void importDocumentsJsonDuplicateError(ArangoCollection collection) throws JsonProcessingException { + String k1 = rnd(); + String k2 = rnd(); + + final String values = mapper.writeValueAsString(Arrays.asList(Collections.singletonMap("_key", k1), + Collections.singletonMap("_key", k2), Collections.singletonMap("_key", k2))); + + final DocumentImportEntity docs = collection.importDocuments(RawJson.of(values), + new DocumentImportOptions().onDuplicate(OnDuplicate.error)); + assertThat(docs).isNotNull(); + assertThat(docs.getCreated()).isEqualTo(2); + assertThat(docs.getEmpty()).isZero(); + assertThat(docs.getErrors()).isEqualTo(1); + assertThat(docs.getIgnored()).isZero(); + assertThat(docs.getUpdated()).isZero(); + assertThat(docs.getDetails()).isEmpty(); + } + + @ParameterizedTest + @MethodSource("cols") + void importDocumentsJsonDuplicateIgnore(ArangoCollection collection) throws JsonProcessingException { + String k1 = rnd(); + String k2 = rnd(); + + final String values = mapper.writeValueAsString(Arrays.asList(Collections.singletonMap("_key", k1), + Collections.singletonMap("_key", k2), Collections.singletonMap("_key", k2))); + final DocumentImportEntity docs = collection.importDocuments(RawJson.of(values), + new DocumentImportOptions().onDuplicate(OnDuplicate.ignore)); + assertThat(docs).isNotNull(); + assertThat(docs.getCreated()).isEqualTo(2); + assertThat(docs.getEmpty()).isZero(); + assertThat(docs.getErrors()).isZero(); + assertThat(docs.getIgnored()).isEqualTo(1); + assertThat(docs.getUpdated()).isZero(); + assertThat(docs.getDetails()).isEmpty(); + } + + @ParameterizedTest + @MethodSource("cols") + void importDocumentsJsonDuplicateReplace(ArangoCollection collection) throws JsonProcessingException { + String k1 = rnd(); + String k2 = rnd(); + + final String values = mapper.writeValueAsString(Arrays.asList(Collections.singletonMap("_key", k1), + Collections.singletonMap("_key", k2), Collections.singletonMap("_key", k2))); + + final DocumentImportEntity docs = collection.importDocuments(RawJson.of(values), + new DocumentImportOptions().onDuplicate(OnDuplicate.replace)); + assertThat(docs).isNotNull(); + assertThat(docs.getCreated()).isEqualTo(2); + assertThat(docs.getEmpty()).isZero(); + assertThat(docs.getErrors()).isZero(); + assertThat(docs.getIgnored()).isZero(); + assertThat(docs.getUpdated()).isEqualTo(1); + assertThat(docs.getDetails()).isEmpty(); + } + + @ParameterizedTest + @MethodSource("cols") + void importDocumentsJsonDuplicateUpdate(ArangoCollection collection) throws JsonProcessingException { + String k1 = rnd(); + String k2 = rnd(); + + final String values = mapper.writeValueAsString(Arrays.asList(Collections.singletonMap("_key", k1), + Collections.singletonMap("_key", k2), Collections.singletonMap("_key", k2))); + + final DocumentImportEntity docs = collection.importDocuments(RawJson.of(values), + new DocumentImportOptions().onDuplicate(OnDuplicate.update)); + assertThat(docs).isNotNull(); + assertThat(docs.getCreated()).isEqualTo(2); + assertThat(docs.getEmpty()).isZero(); + assertThat(docs.getErrors()).isZero(); + assertThat(docs.getIgnored()).isZero(); + assertThat(docs.getUpdated()).isEqualTo(1); + assertThat(docs.getDetails()).isEmpty(); + } + + @ParameterizedTest + @MethodSource("cols") + void importDocumentsJsonCompleteFail(ArangoCollection collection) { + final String values = "[{\"_key\":\"1\"},{\"_key\":\"2\"},{\"_key\":\"2\"}]"; + Throwable thrown = catchThrowable(() -> collection.importDocuments(RawJson.of(values), + new DocumentImportOptions().complete(true))); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + ArangoDBException e = (ArangoDBException) thrown; + assertThat(e.getErrorNum()).isEqualTo(1210); + } + + @ParameterizedTest + @MethodSource("cols") + void importDocumentsJsonDetails(ArangoCollection collection) throws JsonProcessingException { + String k1 = rnd(); + String k2 = rnd(); + + final String values = mapper.writeValueAsString(Arrays.asList(Collections.singletonMap("_key", k1), + Collections.singletonMap("_key", k2), Collections.singletonMap("_key", k2))); + + final DocumentImportEntity docs = collection.importDocuments(RawJson.of(values), + new DocumentImportOptions().details(true)); + assertThat(docs).isNotNull(); + assertThat(docs.getCreated()).isEqualTo(2); + assertThat(docs.getEmpty()).isZero(); + assertThat(docs.getErrors()).isEqualTo(1); + assertThat(docs.getIgnored()).isZero(); + assertThat(docs.getUpdated()).isZero(); + assertThat(docs.getDetails()).hasSize(1); + assertThat(docs.getDetails().iterator().next()).contains("unique constraint violated"); + } + + @ParameterizedTest + @MethodSource("cols") + void importDocumentsJsonOverwriteFalse(ArangoCollection collection) throws JsonProcessingException { + collection.insertDocument(new BaseDocument()); + Long initialCount = collection.count().getCount(); + + final String values = mapper.writeValueAsString(Arrays.asList(Collections.singletonMap("_key", rnd()), + Collections.singletonMap("_key", rnd()))); + collection.importDocuments(RawJson.of(values), new DocumentImportOptions().overwrite(false)); + assertThat(collection.count().getCount()).isEqualTo(initialCount + 2L); + } + + @ParameterizedTest + @MethodSource("cols") + void importDocumentsJsonOverwriteTrue(ArangoCollection collection) throws JsonProcessingException { + collection.insertDocument(new BaseDocument()); + + final String values = mapper.writeValueAsString(Arrays.asList(Collections.singletonMap("_key", rnd()), + Collections.singletonMap("_key", rnd()))); + collection.importDocuments(RawJson.of(values), new DocumentImportOptions().overwrite(true)); + assertThat(collection.count().getCount()).isEqualTo(2L); + } + + @ParameterizedTest + @MethodSource("edges") + void importDocumentsJsonFromToPrefix(ArangoCollection edgeCollection) throws JsonProcessingException { + String k1 = UUID.randomUUID().toString(); + String k2 = UUID.randomUUID().toString(); + + final String[] keys = {k1, k2}; + + final String values = mapper.writeValueAsString(Arrays.asList(new MapBuilder().put("_key", k1).put("_from", + "from").put("_to", "to").get(), new MapBuilder().put("_key", k2).put("_from", "from").put("_to", "to").get())); + + final DocumentImportEntity importResult = edgeCollection.importDocuments(RawJson.of(values), + new DocumentImportOptions().fromPrefix("foo").toPrefix("bar")); + assertThat(importResult).isNotNull(); + assertThat(importResult.getCreated()).isEqualTo(2); + for (String key : keys) { + final BaseEdgeDocument doc = edgeCollection.getDocument(key, BaseEdgeDocument.class); + assertThat(doc).isNotNull(); + assertThat(doc.getFrom()).isEqualTo("foo/from"); + assertThat(doc.getTo()).isEqualTo("bar/to"); + } + } + + @ParameterizedTest + @MethodSource("cols") + void deleteDocumentsByKey(ArangoCollection collection) { + final Collection values = new ArrayList<>(); + { + final BaseDocument e = new BaseDocument(UUID.randomUUID().toString()); + e.setKey("1"); + values.add(e); + } + { + final BaseDocument e = new BaseDocument(UUID.randomUUID().toString()); + e.setKey("2"); + values.add(e); + } + collection.insertDocuments(values); + final Collection keys = new ArrayList<>(); + keys.add("1"); + keys.add("2"); + final MultiDocumentEntity> deleteResult = collection.deleteDocuments(keys); + assertThat(deleteResult).isNotNull(); + assertThat(deleteResult.getDocuments()).hasSize(2); + for (final DocumentDeleteEntity i : deleteResult.getDocuments()) { + assertThat(i.getKey()).isIn("1", "2"); + } + assertThat(deleteResult.getErrors()).isEmpty(); + } + + @ParameterizedTest + @MethodSource("cols") + void deleteDocumentsRawDataByKeyReturnOld(ArangoCollection collection) { + final RawData values = RawJson.of("[{\"_key\":\"1\"},{\"_key\":\"2\"}]"); + collection.insertDocuments(values); + final RawData keys = RawJson.of("[\"1\",\"2\"]"); + MultiDocumentEntity> deleteResult = collection.deleteDocuments(keys, + new DocumentDeleteOptions().returnOld(true)); + assertThat(deleteResult).isNotNull(); + assertThat(deleteResult.getDocuments()).hasSize(2); + for (final DocumentDeleteEntity i : deleteResult.getDocuments()) { + assertThat(i.getKey()).isIn("1", "2"); + assertThat(i.getOld()).isNotNull().isInstanceOf(RawJson.class); + JsonNode jn = SerdeUtils.INSTANCE.parseJson(((RawJson) i.getOld()).get()); + assertThat(jn.get("_key").asText()).isEqualTo(i.getKey()); + } + assertThat(deleteResult.getErrors()).isEmpty(); + } + + @ParameterizedTest + @MethodSource("cols") + void deleteDocumentsByDocuments(ArangoCollection collection) { + final Collection values = new ArrayList<>(); + { + final BaseDocument e = new BaseDocument(UUID.randomUUID().toString()); + e.setKey("1"); + values.add(e); + } + { + final BaseDocument e = new BaseDocument(UUID.randomUUID().toString()); + e.setKey("2"); + values.add(e); + } + collection.insertDocuments(values); + MultiDocumentEntity> deleteResult = collection.deleteDocuments(values); + assertThat(deleteResult).isNotNull(); + assertThat(deleteResult.getDocuments()).hasSize(2); + for (final DocumentDeleteEntity i : deleteResult.getDocuments()) { + assertThat(i.getKey()).isIn("1", "2"); + } + assertThat(deleteResult.getErrors()).isEmpty(); + } + + @ParameterizedTest + @MethodSource("cols") + void deleteDocumentsByKeyOne(ArangoCollection collection) { + final Collection values = new ArrayList<>(); + { + final BaseDocument e = new BaseDocument(UUID.randomUUID().toString()); + e.setKey("1"); + values.add(e); + } + collection.insertDocuments(values); + final Collection keys = new ArrayList<>(); + keys.add("1"); + final MultiDocumentEntity> deleteResult = collection.deleteDocuments(keys); + assertThat(deleteResult).isNotNull(); + assertThat(deleteResult.getDocuments()).hasSize(1); + for (final DocumentDeleteEntity i : deleteResult.getDocuments()) { + assertThat(i.getKey()).isEqualTo("1"); + } + assertThat(deleteResult.getErrors()).isEmpty(); + } + + @ParameterizedTest + @MethodSource("cols") + void deleteDocumentsByDocumentOne(ArangoCollection collection) { + final Collection values = new ArrayList<>(); + { + final BaseDocument e = new BaseDocument(UUID.randomUUID().toString()); + e.setKey("1"); + values.add(e); + } + collection.insertDocuments(values); + final MultiDocumentEntity> deleteResult = collection.deleteDocuments(values); + assertThat(deleteResult).isNotNull(); + assertThat(deleteResult.getDocuments()).hasSize(1); + for (final DocumentDeleteEntity i : deleteResult.getDocuments()) { + assertThat(i.getKey()).isEqualTo("1"); + } + assertThat(deleteResult.getErrors()).isEmpty(); + } + + @ParameterizedTest + @MethodSource("cols") + void deleteDocumentsEmpty(ArangoCollection collection) { + final Collection values = new ArrayList<>(); + collection.insertDocuments(values); + final Collection keys = new ArrayList<>(); + final MultiDocumentEntity deleteResult = collection.deleteDocuments(keys); + assertThat(deleteResult).isNotNull(); + assertThat(deleteResult.getDocuments()).isEmpty(); + assertThat(deleteResult.getErrors()).isEmpty(); + } + + @ParameterizedTest + @MethodSource("cols") + void deleteDocumentsByKeyNotExisting(ArangoCollection collection) { + final Collection values = new ArrayList<>(); + collection.insertDocuments(values); + final Collection keys = Arrays.asList(rnd(), rnd()); + + final MultiDocumentEntity deleteResult = collection.deleteDocuments(keys); + assertThat(deleteResult).isNotNull(); + assertThat(deleteResult.getDocuments()).isEmpty(); + assertThat(deleteResult.getErrors()).hasSize(2); + } + + @ParameterizedTest + @MethodSource("cols") + void deleteDocumentsByDocumentsNotExisting(ArangoCollection collection) { + final Collection values = new ArrayList<>(); + { + final BaseDocument e = new BaseDocument(UUID.randomUUID().toString()); + e.setKey("1"); + values.add(e); + } + { + final BaseDocument e = new BaseDocument(UUID.randomUUID().toString()); + e.setKey("2"); + values.add(e); + } + final MultiDocumentEntity deleteResult = collection.deleteDocuments(values); + assertThat(deleteResult).isNotNull(); + assertThat(deleteResult.getDocuments()).isEmpty(); + assertThat(deleteResult.getErrors()).hasSize(2); + } + + @ParameterizedTest + @MethodSource("cols") + void updateDocuments(ArangoCollection collection) { + final Collection values = Arrays.asList(new BaseDocument(rnd()), new BaseDocument(rnd())); + collection.insertDocuments(values); + values.forEach(it -> it.addAttribute("a", "test")); + + final MultiDocumentEntity updateResult = collection.updateDocuments(values); + assertThat(updateResult.getDocuments()).hasSize(2); + assertThat(updateResult.getErrors()).isEmpty(); + } + + @ParameterizedTest + @MethodSource("cols") + void updateDocumentsWithDifferentReturnType(ArangoCollection collection) { + List keys = + IntStream.range(0, 3).mapToObj(it -> "key-" + UUID.randomUUID()).collect(Collectors.toList()); + List docs = + keys.stream().map(BaseDocument::new).peek(it -> it.addAttribute("a", "test")).collect(Collectors.toList()); + + collection.insertDocuments(docs); + + List> modifiedDocs = docs.stream().peek(it -> it.addAttribute("b", "test")).map(it -> { + Map map = new HashMap<>(); + map.put("_key", it.getKey()); + map.put("a", it.getAttribute("a")); + map.put("b", it.getAttribute("b")); + return map; + }).collect(Collectors.toList()); + + final MultiDocumentEntity> updateResult = + collection.updateDocuments(modifiedDocs, new DocumentUpdateOptions().returnNew(true), BaseDocument.class); + assertThat(updateResult.getDocuments()).hasSize(3); + assertThat(updateResult.getErrors()).isEmpty(); + assertThat(updateResult.getDocuments().stream()).map(DocumentUpdateEntity::getNew).allMatch(it -> it.getAttribute("a").equals("test")).allMatch(it -> it.getAttribute("b").equals("test")); + } + + @ParameterizedTest + @MethodSource("cols") + void updateDocumentsOne(ArangoCollection collection) { + final Collection values = new ArrayList<>(); + { + final BaseDocument e = new BaseDocument(UUID.randomUUID().toString()); + e.setKey("1"); + values.add(e); + } + collection.insertDocuments(values); + final Collection updatedValues = new ArrayList<>(); + final BaseDocument first = values.iterator().next(); + first.addAttribute("a", "test"); + updatedValues.add(first); + final MultiDocumentEntity updateResult = collection.updateDocuments(updatedValues); + assertThat(updateResult.getDocuments()).hasSize(1); + assertThat(updateResult.getErrors()).isEmpty(); + } + + @ParameterizedTest + @MethodSource("cols") + void updateDocumentsEmpty(ArangoCollection collection) { + final Collection values = new ArrayList<>(); + final MultiDocumentEntity updateResult = collection.updateDocuments(values); + assertThat(updateResult.getDocuments()).isEmpty(); + assertThat(updateResult.getErrors()).isEmpty(); + } + + @ParameterizedTest + @MethodSource("cols") + void updateDocumentsWithoutKey(ArangoCollection collection) { + final Collection values = new ArrayList<>(); + { + values.add(new BaseDocument("1")); + } + collection.insertDocuments(values); + final Collection updatedValues = new ArrayList<>(); + for (final BaseDocument i : values) { + i.addAttribute("a", "test"); + updatedValues.add(i); + } + updatedValues.add(new BaseDocument()); + final MultiDocumentEntity updateResult = collection.updateDocuments(updatedValues); + assertThat(updateResult.getDocuments()).hasSize(1); + assertThat(updateResult.getErrors()).hasSize(1); + } + + @ParameterizedTest + @MethodSource("cols") + void updateDocumentsJson(ArangoCollection collection) { + final Collection values = new ArrayList<>(); + values.add(RawJson.of("{\"_key\":\"1\"}")); + values.add(RawJson.of("{\"_key\":\"2\"}")); + collection.insertDocuments(values); + + final Collection updatedValues = new ArrayList<>(); + updatedValues.add(RawJson.of("{\"_key\":\"1\", \"foo\":\"bar\"}")); + updatedValues.add(RawJson.of("{\"_key\":\"2\", \"foo\":\"bar\"}")); + final MultiDocumentEntity updateResult = collection.updateDocuments(updatedValues); + assertThat(updateResult.getDocuments()).hasSize(2); + assertThat(updateResult.getErrors()).isEmpty(); + } + + @ParameterizedTest + @MethodSource("cols") + void updateDocumentsRawData(ArangoCollection collection) { + final RawData values = RawJson.of("[{\"_key\":\"1\"}, {\"_key\":\"2\"}]"); + collection.insertDocuments(values); + + final RawData updatedValues = RawJson.of("[{\"_key\":\"1\", \"foo\":\"bar\"}, {\"_key\":\"2\", " + + "\"foo\":\"bar\"}]"); + final MultiDocumentEntity updateResult = collection.updateDocuments(updatedValues); + assertThat(updateResult.getDocuments()).hasSize(2); + assertThat(updateResult.getErrors()).isEmpty(); + } + + @ParameterizedTest + @MethodSource("cols") + void updateDocumentsRawDataReturnNew(ArangoCollection collection) { + final RawData values = RawJson.of("[{\"_key\":\"1\"}, {\"_key\":\"2\"}]"); + collection.insertDocuments(values); + + final RawData updatedValues = RawJson.of("[{\"_key\":\"1\", \"foo\":\"bar\"}, {\"_key\":\"2\", " + + "\"foo\":\"bar\"}]"); + MultiDocumentEntity> updateResult = + collection.updateDocuments(updatedValues, new DocumentUpdateOptions().returnNew(true)); + assertThat(updateResult.getDocuments()).hasSize(2); + assertThat(updateResult.getErrors()).isEmpty(); + for (DocumentUpdateEntity doc : updateResult.getDocuments()) { + RawData d = doc.getNew(); + assertThat(d) + .isNotNull() + .isInstanceOf(RawJson.class); + + JsonNode jn = SerdeUtils.INSTANCE.parseJson(((RawJson) d).get()); + assertThat(jn.has("foo")).isTrue(); + assertThat(jn.get("foo").textValue()).isEqualTo("bar"); + } + } + + @ParameterizedTest + @MethodSource("cols") + void replaceDocuments(ArangoCollection collection) { + final Collection values = new ArrayList<>(); + { + values.add(new BaseDocument("1")); + values.add(new BaseDocument("2")); + } + collection.insertDocuments(values); + final Collection updatedValues = new ArrayList<>(); + for (final BaseDocument i : values) { + i.addAttribute("a", "test"); + updatedValues.add(i); + } + final MultiDocumentEntity updateResult = collection.replaceDocuments(updatedValues); + assertThat(updateResult.getDocuments()).hasSize(2); + assertThat(updateResult.getErrors()).isEmpty(); + } + + @ParameterizedTest + @MethodSource("cols") + void replaceDocumentsOne(ArangoCollection collection) { + final Collection values = new ArrayList<>(); + { + final BaseDocument e = new BaseDocument(UUID.randomUUID().toString()); + e.setKey("1"); + values.add(e); + } + collection.insertDocuments(values); + final Collection updatedValues = new ArrayList<>(); + final BaseDocument first = values.iterator().next(); + first.addAttribute("a", "test"); + updatedValues.add(first); + final MultiDocumentEntity updateResult = collection.updateDocuments(updatedValues); + assertThat(updateResult.getDocuments()).hasSize(1); + assertThat(updateResult.getErrors()).isEmpty(); + } + + @ParameterizedTest + @MethodSource("cols") + void replaceDocumentsEmpty(ArangoCollection collection) { + final Collection values = new ArrayList<>(); + final MultiDocumentEntity updateResult = collection.updateDocuments(values); + assertThat(updateResult.getDocuments()).isEmpty(); + assertThat(updateResult.getErrors()).isEmpty(); + } + + @ParameterizedTest + @MethodSource("cols") + void replaceDocumentsWithoutKey(ArangoCollection collection) { + final Collection values = new ArrayList<>(); + { + values.add(new BaseDocument("1")); + } + collection.insertDocuments(values); + final Collection updatedValues = new ArrayList<>(); + for (final BaseDocument i : values) { + i.addAttribute("a", "test"); + updatedValues.add(i); + } + updatedValues.add(new BaseDocument()); + final MultiDocumentEntity updateResult = collection.updateDocuments(updatedValues); + assertThat(updateResult.getDocuments()).hasSize(1); + assertThat(updateResult.getErrors()).hasSize(1); + } + + @ParameterizedTest + @MethodSource("cols") + void replaceDocumentsJson(ArangoCollection collection) { + final Collection values = new ArrayList<>(); + values.add(RawJson.of("{\"_key\":\"1\"}")); + values.add(RawJson.of("{\"_key\":\"2\"}")); + collection.insertDocuments(values); + + final Collection updatedValues = new ArrayList<>(); + updatedValues.add(RawJson.of("{\"_key\":\"1\", \"foo\":\"bar\"}")); + updatedValues.add(RawJson.of("{\"_key\":\"2\", \"foo\":\"bar\"}")); + final MultiDocumentEntity updateResult = collection.replaceDocuments(updatedValues); + assertThat(updateResult.getDocuments()).hasSize(2); + assertThat(updateResult.getErrors()).isEmpty(); + } + + @ParameterizedTest + @MethodSource("cols") + void replaceDocumentsRawData(ArangoCollection collection) { + final RawData values = RawJson.of("[{\"_key\":\"1\"}, {\"_key\":\"2\"}]"); + collection.insertDocuments(values); + + final RawData updatedValues = RawJson.of("[{\"_key\":\"1\", \"foo\":\"bar\"}, {\"_key\":\"2\", " + + "\"foo\":\"bar\"}]"); + final MultiDocumentEntity updateResult = collection.replaceDocuments(updatedValues); + assertThat(updateResult.getDocuments()).hasSize(2); + assertThat(updateResult.getErrors()).isEmpty(); + } + + @ParameterizedTest + @MethodSource("cols") + void replaceDocumentsRawDataReturnNew(ArangoCollection collection) { + final RawData values = RawJson.of("[{\"_key\":\"1\"}, {\"_key\":\"2\"}]"); + collection.insertDocuments(values); + + final RawData updatedValues = RawJson.of("[{\"_key\":\"1\", \"foo\":\"bar\"}, {\"_key\":\"2\", " + + "\"foo\":\"bar\"}]"); + MultiDocumentEntity> updateResult = + collection.replaceDocuments(updatedValues, new DocumentReplaceOptions().returnNew(true)); + assertThat(updateResult.getDocuments()).hasSize(2); + assertThat(updateResult.getErrors()).isEmpty(); + for (DocumentUpdateEntity doc : updateResult.getDocuments()) { + RawData d = doc.getNew(); + assertThat(d) + .isNotNull() + .isInstanceOf(RawJson.class); + + JsonNode jn = SerdeUtils.INSTANCE.parseJson(((RawJson) d).get()); + assertThat(jn.has("foo")).isTrue(); + assertThat(jn.get("foo").textValue()).isEqualTo("bar"); + } + } + + @ParameterizedTest + @MethodSource("cols") + void getInfo(ArangoCollection collection) { + final CollectionEntity result = collection.getInfo(); + assertThat(result.getName()).isEqualTo(COLLECTION_NAME); + } + + @ParameterizedTest + @MethodSource("cols") + void getPropeties(ArangoCollection collection) { + final CollectionPropertiesEntity result = collection.getProperties(); + assertThat(result.getName()).isEqualTo(COLLECTION_NAME); + assertThat(result.getCount()).isNull(); + } + + @ParameterizedTest + @MethodSource("cols") + void changeProperties(ArangoCollection collection) { + assumeTrue(isCluster()); + final CollectionPropertiesEntity properties = collection.getProperties(); + assertThat(properties.getWaitForSync()).isNotNull(); + assertThat(properties.getSchema()).isNull(); + + String schemaRule = ("{ " + " \"properties\": {" + " \"number\": {" + " " + + " \"type\": \"number\"" + " }" + " }" + " }").replaceAll("\\s", ""); + String schemaMessage = "The document has problems!"; + + CollectionPropertiesOptions updatedOptions = + new CollectionPropertiesOptions() + .cacheEnabled(!properties.getCacheEnabled()) + .computedValues(new ComputedValue() + .name("foo") + .expression("RETURN 11") + .overwrite(false) + .computeOn(ComputedValue.ComputeOn.insert) + .keepNull(false) + .failOnWarning(true)) + .replicationFactor(ReplicationFactor.of(3)) + .schema(new CollectionSchema().setLevel(CollectionSchema.Level.NEW).setMessage(schemaMessage).setRule(schemaRule)) + .waitForSync(!properties.getWaitForSync()) + .writeConcern(2); + + final CollectionPropertiesEntity changedProperties = collection.changeProperties(updatedOptions); + assertThat(changedProperties.getCacheEnabled()).isEqualTo(updatedOptions.getCacheEnabled()); + assertThat(changedProperties.getComputedValues()) + .hasSize(1) + .contains(updatedOptions.getComputedValues().get(0)); + assertThat(changedProperties.getReplicationFactor().get()).isEqualTo(updatedOptions.getReplicationFactor().get()); + assertThat(changedProperties.getSchema().getLevel()).isEqualTo(CollectionSchema.Level.NEW); + assertThat(changedProperties.getSchema().getMessage()).isEqualTo(schemaMessage); + assertThat(changedProperties.getSchema().getRule()).isEqualTo(schemaRule); + assertThat(changedProperties.getWaitForSync()).isEqualTo(updatedOptions.getWaitForSync()); + assertThat(changedProperties.getWriteConcern()).isEqualTo(updatedOptions.getWriteConcern()); + + // revert changes + CollectionPropertiesOptions revertOptions = new CollectionPropertiesOptions() + .cacheEnabled(properties.getCacheEnabled()) + .computedValues() + .replicationFactor(properties.getReplicationFactor()) + .schema(properties.getSchema()) + .waitForSync(properties.getWaitForSync()) + .writeConcern(properties.getWriteConcern()); + collection.changeProperties(revertOptions); + } + + @ParameterizedTest + @MethodSource("cols") + void rename(ArangoCollection collection) { + assumeTrue(isSingleServer()); + ArangoDatabase db = collection.db(); + + if (!db.collection("c1").exists()) { + db.collection("c1").create(); + } + + if (db.collection("c2").exists()) { + db.collection("c2").drop(); + } + + final CollectionEntity result = db.collection("c1").rename("c2"); + assertThat(result).isNotNull(); + assertThat(result.getName()).isEqualTo("c2"); + + final CollectionEntity info = db.collection("c2").getInfo(); + assertThat(info.getName()).isEqualTo("c2"); + + Throwable thrown = catchThrowable(() -> db.collection("c1").getInfo()); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + ArangoDBException e = (ArangoDBException) thrown; + assertThat(e.getResponseCode()).isEqualTo(404); + } + + @ParameterizedTest + @MethodSource("cols") + void responsibleShard(ArangoCollection collection) { + assumeTrue(isCluster()); + assumeTrue(isAtLeastVersion(3, 5)); + ShardEntity shard = collection.getResponsibleShard(new BaseDocument("testKey")); + assertThat(shard).isNotNull(); + assertThat(shard.getShardId()).isNotNull(); + } + + @ParameterizedTest + @MethodSource("cols") + void getRevision(ArangoCollection collection) { + final CollectionRevisionEntity result = collection.getRevision(); + assertThat(result).isNotNull(); + assertThat(result.getName()).isEqualTo(COLLECTION_NAME); + assertThat(result.getRevision()).isNotNull(); + } + + @ParameterizedTest + @MethodSource("cols") + void keyWithSpecialCharacter(ArangoCollection collection) { + final String key = "myKey_-:.@()+,=;$!*'%-" + UUID.randomUUID(); + collection.insertDocument(new BaseDocument(key)); + final BaseDocument doc = collection.getDocument(key, BaseDocument.class); + assertThat(doc).isNotNull(); + assertThat(doc.getKey()).isEqualTo(key); + } + + @ParameterizedTest + @MethodSource("cols") + void alreadyUrlEncodedkey(ArangoCollection collection) { + final String key = "http%3A%2F%2Fexample.com%2F-" + UUID.randomUUID(); + collection.insertDocument(new BaseDocument(key)); + final BaseDocument doc = collection.getDocument(key, BaseDocument.class); + assertThat(doc).isNotNull(); + assertThat(doc.getKey()).isEqualTo(key); + } + + @ParameterizedTest + @MethodSource("cols") + void grantAccessRW(ArangoCollection collection) { + ArangoDB arangoDB = collection.db().arango(); + try { + arangoDB.createUser("user1", "1234", null); + collection.grantAccess("user1", Permissions.RW); + } finally { + arangoDB.deleteUser("user1"); + } + } + + @ParameterizedTest + @MethodSource("cols") + void grantAccessRO(ArangoCollection collection) { + ArangoDB arangoDB = collection.db().arango(); + try { + arangoDB.createUser("user1", "1234", null); + collection.grantAccess("user1", Permissions.RO); + } finally { + arangoDB.deleteUser("user1"); + } + } + + @ParameterizedTest + @MethodSource("cols") + void grantAccessNONE(ArangoCollection collection) { + ArangoDB arangoDB = collection.db().arango(); + try { + arangoDB.createUser("user1", "1234", null); + collection.grantAccess("user1", Permissions.NONE); + } finally { + arangoDB.deleteUser("user1"); + } + } + + @ParameterizedTest + @MethodSource("cols") + void grantAccessUserNotFound(ArangoCollection collection) { + Throwable thrown = catchThrowable(() -> collection.grantAccess("user1", Permissions.RW)); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + } + + @ParameterizedTest + @MethodSource("cols") + void revokeAccess(ArangoCollection collection) { + ArangoDB arangoDB = collection.db().arango(); + try { + arangoDB.createUser("user1", "1234", null); + collection.grantAccess("user1", Permissions.NONE); + } finally { + arangoDB.deleteUser("user1"); + } + } + + @ParameterizedTest + @MethodSource("cols") + void revokeAccessUserNotFound(ArangoCollection collection) { + Throwable thrown = catchThrowable(() -> collection.grantAccess("user1", Permissions.NONE)); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + } + + @ParameterizedTest + @MethodSource("cols") + void resetAccess(ArangoCollection collection) { + ArangoDB arangoDB = collection.db().arango(); + try { + arangoDB.createUser("user1", "1234", null); + collection.resetAccess("user1"); + } finally { + arangoDB.deleteUser("user1"); + } + } + + @ParameterizedTest + @MethodSource("cols") + void resetAccessUserNotFound(ArangoCollection collection) { + Throwable thrown = catchThrowable(() -> collection.resetAccess("user1")); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + } + + @ParameterizedTest + @MethodSource("cols") + void getPermissions(ArangoCollection collection) { + assertThat(collection.getPermissions("root")).isEqualTo(Permissions.RW); + } + + @ParameterizedTest + @MethodSource("cols") + void annotationsInParamsAndMethods(ArangoCollection collection) { + assumeTrue(collection.getSerde().getUserSerde() instanceof JacksonSerde, "JacksonSerde only"); + AnnotatedEntity entity = new AnnotatedEntity(UUID.randomUUID().toString()); + AnnotatedEntity doc = collection.insertDocument(entity, new DocumentCreateOptions().returnNew(true)).getNew(); + assertThat(doc).isNotNull(); + assertThat(doc.getKey()).isEqualTo(entity.getKey()); + assertThat(doc.getId()).isNotNull(); + assertThat(doc.getRev()).isNotNull(); + } + + @JsonTypeInfo(use = JsonTypeInfo.Id.CLASS, property = "type") + public interface Animal { + String getKey(); + + String getName(); + } + + public static class Dog implements Animal { + + @Key + private String key; + private String name; + + public Dog() { + } + + public Dog(String key, String name) { + this.key = key; + this.name = name; + } + + @Override + public String getKey() { + return key; + } + + public void setKey(String key) { + this.key = key; + } + + @Override + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + } + + public static class Cat implements Animal { + @Key + private String key; + private String name; + + public Cat() { + } + + public Cat(String key, String name) { + this.key = key; + this.name = name; + } + + @Override + public String getKey() { + return key; + } + + public void setKey(String key) { + this.key = key; + } + + @Override + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + } + + public static class TestUpdateEntity { + private String a, b; + + public String getA() { + return a; + } + + public String getB() { + return b; + } + } + + public static class TestUpdateEntitySerializeNullFalse { + private String a, b; + + @JsonInclude(JsonInclude.Include.NON_NULL) + public String getA() { + return a; + } + + @JsonInclude(JsonInclude.Include.NON_NULL) + public String getB() { + return b; + } + } + + public static class AnnotatedEntity { + + private final String key; + private String id; + private String rev; + + public AnnotatedEntity(@Key String key) { + this.key = key; + } + + public String getKey() { + return key; + } + + public String getId() { + return id; + } + + @Id + public void setId(String id) { + this.id = id; + } + + public String getRev() { + return rev; + } + + @Rev + public void setRev(String rev) { + this.rev = rev; + } + } + +} diff --git a/test-functional/src/test/java/com/arangodb/ArangoConfigTest.java b/test-functional/src/test/java/com/arangodb/ArangoConfigTest.java new file mode 100644 index 000000000..c017ef718 --- /dev/null +++ b/test-functional/src/test/java/com/arangodb/ArangoConfigTest.java @@ -0,0 +1,49 @@ +package com.arangodb; + +import com.arangodb.http.HttpProtocolConfig; +import com.arangodb.internal.ArangoDefaults; +import com.arangodb.internal.config.ArangoConfig; +import org.junit.jupiter.api.Test; + +import javax.net.ssl.SSLContext; + +import java.security.NoSuchAlgorithmException; + +import static org.assertj.core.api.Assertions.assertThat; + +public class ArangoConfigTest { + @Test + void ArangoConfigDefaultValues() throws NoSuchAlgorithmException { + ArangoConfig cfg = new ArangoConfig(); + assertThat(cfg.getHosts()).isEqualTo(ArangoDefaults.DEFAULT_HOSTS); + assertThat(cfg.getProtocol()).isEqualTo(Protocol.HTTP2_JSON); + assertThat(cfg.getTimeout()).isEqualTo(ArangoDefaults.DEFAULT_TIMEOUT); + assertThat(cfg.getUser()).isEqualTo(ArangoDefaults.DEFAULT_USER); + assertThat(cfg.getPassword()).isNull(); + assertThat(cfg.getJwt()).isNull(); + assertThat(cfg.getUseSsl()).isEqualTo(ArangoDefaults.DEFAULT_USE_SSL); + assertThat(cfg.getSslContext()).isEqualTo(SSLContext.getDefault()); + assertThat(cfg.getVerifyHost()).isEqualTo(ArangoDefaults.DEFAULT_VERIFY_HOST); + assertThat(cfg.getChunkSize()).isEqualTo(ArangoDefaults.DEFAULT_CHUNK_SIZE); + assertThat(cfg.getMaxConnections()).isEqualTo(ArangoDefaults.MAX_CONNECTIONS_HTTP2_DEFAULT); + assertThat(cfg.getConnectionTtl()).isEqualTo(ArangoDefaults.DEFAULT_CONNECTION_TTL_HTTP); + assertThat(cfg.getKeepAliveInterval()).isNull(); + assertThat(cfg.getAcquireHostList()).isEqualTo(ArangoDefaults.DEFAULT_ACQUIRE_HOST_LIST); + assertThat(cfg.getAcquireHostListInterval()).isEqualTo(ArangoDefaults.DEFAULT_ACQUIRE_HOST_LIST_INTERVAL); + assertThat(cfg.getLoadBalancingStrategy()).isEqualTo(ArangoDefaults.DEFAULT_LOAD_BALANCING_STRATEGY); + assertThat(cfg.getResponseQueueTimeSamples()).isEqualTo(ArangoDefaults.DEFAULT_RESPONSE_QUEUE_TIME_SAMPLES); + assertThat(cfg.getAsyncExecutor()).isNull(); + assertThat(cfg.getCompression()).isEqualTo(ArangoDefaults.DEFAULT_COMPRESSION); + assertThat(cfg.getCompressionThreshold()).isEqualTo(ArangoDefaults.DEFAULT_COMPRESSION_THRESHOLD); + assertThat(cfg.getCompressionLevel()).isEqualTo(ArangoDefaults.DEFAULT_COMPRESSION_LEVEL); + assertThat(cfg.getProtocolConfig()).isNull(); + assertThat(cfg.getSerdeProviderClass()).isNull(); + } + + @Test + void HttpProtocolConfigDefaultValues() { + HttpProtocolConfig cfg = HttpProtocolConfig.builder().build(); + assertThat(cfg.getVertx()).isNull(); + } + +} diff --git a/test-functional/src/test/java/com/arangodb/ArangoCursorTest.java b/test-functional/src/test/java/com/arangodb/ArangoCursorTest.java new file mode 100644 index 000000000..1d542dae8 --- /dev/null +++ b/test-functional/src/test/java/com/arangodb/ArangoCursorTest.java @@ -0,0 +1,132 @@ +/* + * DISCLAIMER + * + * Copyright 2018 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb; + +import com.arangodb.model.AqlQueryOptions; +import com.fasterxml.jackson.databind.JsonNode; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; + +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.atomic.AtomicLong; +import java.util.stream.Collectors; + +import static org.assertj.core.api.Assertions.assertThat; + +/** + * @author Mark Vollmary + * @author Michele Rastelli + */ +class ArangoCursorTest extends BaseJunit5 { + + @BeforeAll + static void init() { + initDB(); + } + + @ParameterizedTest + @MethodSource("dbs") + void firstStream(ArangoDatabase db) { + final ArangoCursor cursor = db.query("FOR i IN 0..99 RETURN i", JsonNode.class); + final Optional first = cursor.stream().findFirst(); + assertThat(first).isPresent(); + assertThat(first.get().isInt()).isTrue(); + assertThat(first.get().asLong()).isZero(); + } + + @ParameterizedTest + @MethodSource("dbs") + void next(ArangoDatabase db) { + final ArangoCursor cursor = db.query("FOR i IN 0..99 RETURN i", JsonNode.class, new AqlQueryOptions().batchSize(5)); + while (cursor.hasNext()) { + cursor.next(); + } + } + + @ParameterizedTest + @MethodSource("dbs") + void mapFilterCountStream(ArangoDatabase db) { + final ArangoCursor cursor = db.query("FOR i IN 0..99 RETURN i", JsonNode.class); + final long count = cursor.stream().map(JsonNode::asLong).filter(t -> t < 50).count(); + assertThat(count).isEqualTo(50L); + } + + @ParameterizedTest + @MethodSource("dbs") + void mapFilterCollectIntoSetStream(ArangoDatabase db) { + final ArangoCursor cursor = db.query("FOR i IN 0..99 RETURN i", JsonNode.class); + final Set target = cursor.stream().map(JsonNode::asLong).filter(t -> t < 50).collect(Collectors.toSet()); + assertThat(target) + .isNotNull() + .hasSize(50); + } + + @ParameterizedTest + @MethodSource("dbs") + void forEach(ArangoDatabase db) { + final AtomicLong i = new AtomicLong(0L); + final ArangoCursor cursor = db.query("FOR i IN 0..99 RETURN i", JsonNode.class); + cursor.forEach(t -> assertThat(t.asLong()).isEqualTo(i.getAndIncrement())); + } + + @ParameterizedTest + @MethodSource("dbs") + void mapForeachStream(ArangoDatabase db) { + final AtomicLong i = new AtomicLong(0L); + final ArangoCursor cursor = db.query("FOR i IN 0..99 RETURN i", JsonNode.class); + cursor.stream().map(JsonNode::asLong).forEach(t -> assertThat(t).isEqualTo(i.getAndIncrement())); + } + + @ParameterizedTest + @MethodSource("dbs") + void mapFilterForEachStream(ArangoDatabase db) { + final AtomicLong i = new AtomicLong(0L); + final ArangoCursor cursor = db.query("FOR i IN 0..99 RETURN i", JsonNode.class); + cursor.stream().map(JsonNode::asLong).filter(t -> t < 50).forEach(t -> assertThat(t).isEqualTo(i.getAndIncrement())); + } + + @ParameterizedTest + @MethodSource("dbs") + void anyMatchStream(ArangoDatabase db) { + final ArangoCursor cursor = db.query("FOR i IN 0..99 RETURN i", JsonNode.class); + final boolean match = cursor.stream().anyMatch(t -> t.asLong() == 50L); + assertThat(match).isTrue(); + } + + @ParameterizedTest + @MethodSource("dbs") + void noneMatchStream(ArangoDatabase db) { + final ArangoCursor cursor = db.query("FOR i IN 0..99 RETURN i", JsonNode.class); + final boolean match = cursor.stream().noneMatch(t -> t.asLong() == 100L); + assertThat(match).isTrue(); + } + + @ParameterizedTest + @MethodSource("dbs") + void allMatchStream(ArangoDatabase db) { + final ArangoCursor cursor = db.query("FOR i IN 0..99 RETURN i", JsonNode.class); + final boolean match = cursor.stream().allMatch(t -> t.asLong() < 100L); + assertThat(match).isTrue(); + } + +} diff --git a/test-functional/src/test/java/com/arangodb/ArangoDBAsyncTest.java b/test-functional/src/test/java/com/arangodb/ArangoDBAsyncTest.java new file mode 100644 index 000000000..47ac152e8 --- /dev/null +++ b/test-functional/src/test/java/com/arangodb/ArangoDBAsyncTest.java @@ -0,0 +1,717 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb; + +import com.arangodb.entity.*; +import com.arangodb.internal.ArangoRequestParam; +import com.arangodb.internal.serde.SerdeUtils; +import com.arangodb.model.*; +import com.arangodb.model.LogOptions.SortOrder; +import com.arangodb.util.RawJson; +import com.arangodb.util.SlowTest; +import com.arangodb.util.UnicodeUtils; +import com.fasterxml.jackson.databind.JsonNode; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.EnumSource; +import org.junit.jupiter.params.provider.MethodSource; + +import java.util.*; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.catchThrowable; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + +/** + * @author Mark Vollmary + * @author Reşat SABIQ + * @author Michele Rastelli + */ +class ArangoDBAsyncTest extends BaseJunit5 { + + private static final String DB1 = "ArangoDBTest_db1"; + private static final String DB2 = "ArangoDBTest_db2"; + + private static final String ROOT = "root"; + private static final String PW = "machts der hund"; + + @BeforeAll + static void initDBs() { + initDB(DB1); + initDB(DB2); + } + + @AfterAll + static void shutdown() { + dropDB(DB1); + dropDB(DB2); + } + + @ParameterizedTest + @MethodSource("asyncArangos") + void getVersion(ArangoDBAsync arangoDB) throws ExecutionException, InterruptedException { + final ArangoDBVersion version = arangoDB.getVersion().get(); + assertThat(version.getServer()).isNotNull(); + assertThat(version.getVersion()).isNotNull(); + } + + @SlowTest + @ParameterizedTest + @MethodSource("asyncArangos") + void createAndDeleteDatabase(ArangoDBAsync arangoDB) throws ExecutionException, InterruptedException { + final String dbName = rndDbName(); + final Boolean resultCreate = arangoDB.createDatabase(dbName).get(); + assertThat(resultCreate).isTrue(); + final Boolean resultDelete = arangoDB.db(dbName).drop().get(); + assertThat(resultDelete).isTrue(); + } + + @SlowTest + @ParameterizedTest + @MethodSource("asyncArangos") + void createWithNotNormalizedName(ArangoDBAsync arangoDB) throws ExecutionException, InterruptedException { + assumeTrue(supportsExtendedDbNames()); + + final String dbName = "testDB-\u006E\u0303\u00f1"; + String normalized = UnicodeUtils.normalize(dbName); + arangoDB.createDatabase(normalized).get(); + arangoDB.db(normalized).drop().get(); + + Throwable thrown = catchThrowable(() -> arangoDB.createDatabase(dbName).get()).getCause(); + assertThat(thrown) + .isInstanceOf(ArangoDBException.class) + .hasMessageContaining("normalized"); + } + + @SlowTest + @ParameterizedTest + @MethodSource("asyncArangos") + void createDatabaseWithOptions(ArangoDBAsync arangoDB) throws ExecutionException, InterruptedException { + assumeTrue(isCluster()); + assumeTrue(isAtLeastVersion(3, 6)); + final String dbName = rndDbName(); + final Boolean resultCreate = arangoDB.createDatabase(new DBCreateOptions() + .name(dbName) + .options(new DatabaseOptions() + .writeConcern(2) + .replicationFactor(2) + .sharding("") + ) + ).get(); + assertThat(resultCreate).isTrue(); + + DatabaseEntity info = arangoDB.db(dbName).getInfo().get(); + assertThat(info.getReplicationFactor().get()).isEqualTo(2); + assertThat(info.getWriteConcern()).isEqualTo(2); + assertThat(info.getSharding()).isEmpty(); + + final Boolean resultDelete = arangoDB.db(dbName).drop().get(); + assertThat(resultDelete).isTrue(); + } + + @SlowTest + @ParameterizedTest + @MethodSource("asyncArangos") + void createDatabaseWithOptionsSatellite(ArangoDBAsync arangoDB) throws ExecutionException, InterruptedException { + assumeTrue(isCluster()); + assumeTrue(isEnterprise()); + assumeTrue(isAtLeastVersion(3, 6)); + + final String dbName = rndDbName(); + final Boolean resultCreate = arangoDB.createDatabase(new DBCreateOptions() + .name(dbName) + .options(new DatabaseOptions() + .writeConcern(2) + .replicationFactor(ReplicationFactor.ofSatellite()) + .sharding("") + ) + ).get(); + assertThat(resultCreate).isTrue(); + + DatabaseEntity info = arangoDB.db(dbName).getInfo().get(); + assertThat(info.getReplicationFactor()).isEqualTo(ReplicationFactor.ofSatellite()); + assertThat(info.getWriteConcern()).isEqualTo(2); + assertThat(info.getSharding()).isEmpty(); + + final Boolean resultDelete = arangoDB.db(dbName).drop().get(); + assertThat(resultDelete).isTrue(); + } + + @SlowTest + @ParameterizedTest + @MethodSource("asyncArangos") + void createDatabaseWithUsers(ArangoDBAsync arangoDB) throws InterruptedException, ExecutionException { + final String dbName = rndDbName(); + final Map extra = Collections.singletonMap("key", "value"); + final Boolean resultCreate = arangoDB.createDatabase(new DBCreateOptions() + .name(dbName) + .users(Collections.singletonList(new DatabaseUsersOptions() + .active(true) + .username("testUser") + .passwd("testPasswd") + .extra(extra) + )) + ).get(); + assertThat(resultCreate).isTrue(); + + DatabaseEntity info = arangoDB.db(dbName).getInfo().get(); + assertThat(info.getName()).isEqualTo(dbName); + + Optional retrievedUserOptional = arangoDB.getUsers().get().stream() + .filter(it -> it.getUser().equals("testUser")) + .findFirst(); + assertThat(retrievedUserOptional).isPresent(); + + UserEntity retrievedUser = retrievedUserOptional.get(); + assertThat(retrievedUser.getActive()).isTrue(); + assertThat(retrievedUser.getExtra()).isEqualTo(extra); + + // needed for active-failover tests only + Thread.sleep(2_000); + + ArangoDBAsync arangoDBTestUser = new ArangoDB.Builder() + .loadProperties(config) + .user("testUser") + .password("testPasswd") + .build() + .async(); + + // check if testUser has been created and can access the created db + ArangoCollectionAsync collection = arangoDBTestUser.db(dbName).collection("col-" + UUID.randomUUID()); + collection.create().get(); + arangoDBTestUser.shutdown(); + + final Boolean resultDelete = arangoDB.db(dbName).drop().get(); + assertThat(resultDelete).isTrue(); + } + + @ParameterizedTest + @MethodSource("asyncArangos") + void getDatabases(ArangoDBAsync arangoDB) throws ExecutionException, InterruptedException { + Collection dbs = arangoDB.getDatabases().get(); + assertThat(dbs).contains("_system", DB1); + } + + @ParameterizedTest + @MethodSource("asyncArangos") + void getAccessibleDatabases(ArangoDBAsync arangoDB) throws ExecutionException, InterruptedException { + final Collection dbs = arangoDB.getAccessibleDatabases().get(); + assertThat(dbs).contains("_system"); + } + + @ParameterizedTest + @MethodSource("asyncArangos") + void getAccessibleDatabasesFor(ArangoDBAsync arangoDB) throws ExecutionException, InterruptedException { + final Collection dbs = arangoDB.getAccessibleDatabasesFor("root").get(); + assertThat(dbs).contains("_system"); + } + + @ParameterizedTest + @MethodSource("asyncArangos") + void createUser(ArangoDBAsync arangoDB) throws ExecutionException, InterruptedException { + assumeTrue(isSingleServer()); + String username = "user-" + UUID.randomUUID(); + final UserEntity result = arangoDB.createUser(username, PW, null).get(); + try { + assertThat(result.getUser()).isEqualTo(username); + } finally { + arangoDB.deleteUser(username).get(); + } + } + + @ParameterizedTest + @MethodSource("asyncArangos") + void deleteUser(ArangoDBAsync arangoDB) throws ExecutionException, InterruptedException { + String username = "user-" + UUID.randomUUID(); + arangoDB.createUser(username, PW, null).get(); + arangoDB.deleteUser(username).get(); + } + + @ParameterizedTest + @MethodSource("asyncArangos") + void getUserRoot(ArangoDBAsync arangoDB) throws ExecutionException, InterruptedException { + final UserEntity user = arangoDB.getUser(ROOT).get(); + assertThat(user.getUser()).isEqualTo(ROOT); + } + + @ParameterizedTest + @MethodSource("asyncArangos") + void getUser(ArangoDBAsync arangoDB) throws ExecutionException, InterruptedException { + String username = "user-" + UUID.randomUUID(); + arangoDB.createUser(username, PW, null).get(); + final UserEntity user = arangoDB.getUser(username).get(); + try { + assertThat(user.getUser()).isEqualTo(username); + } finally { + arangoDB.deleteUser(username).get(); + } + } + + @ParameterizedTest + @MethodSource("asyncArangos") + void getUsersOnlyRoot(ArangoDBAsync arangoDB) throws ExecutionException, InterruptedException { + final Collection users = arangoDB.getUsers().get(); + assertThat(users).isNotEmpty(); + } + + @ParameterizedTest + @MethodSource("asyncArangos") + void getUsers(ArangoDBAsync arangoDB) throws ExecutionException, InterruptedException { + String username = "user-" + UUID.randomUUID(); + // Allow & account for pre-existing users other than ROOT: + final Collection initialUsers = arangoDB.getUsers().get(); + + arangoDB.createUser(username, PW, null).get(); + try { + final Collection users = arangoDB.getUsers().get(); + assertThat(users).hasSize(initialUsers.size() + 1); + + final List expected = new ArrayList<>(users.size()); + // Add initial users, including root: + for (final UserEntity userEntity : initialUsers) { + expected.add(userEntity.getUser()); + } + // Add username: + expected.add(username); + + for (final UserEntity user : users) { + assertThat(user.getUser()).isIn(expected); + } + } finally { + arangoDB.deleteUser(username).get(); + } + } + + @ParameterizedTest + @MethodSource("asyncArangos") + void updateUserNoOptions(ArangoDBAsync arangoDB) throws ExecutionException, InterruptedException { + String username = "user-" + UUID.randomUUID(); + arangoDB.createUser(username, PW, null).get(); + try { + arangoDB.updateUser(username, null).get(); + } finally { + arangoDB.deleteUser(username).get(); + } + } + + @ParameterizedTest + @MethodSource("asyncArangos") + void updateUser(ArangoDBAsync arangoDB) throws ExecutionException, InterruptedException { + String username = "user-" + UUID.randomUUID(); + final Map extra = new HashMap<>(); + extra.put("hund", false); + arangoDB.createUser(username, PW, new UserCreateOptions().extra(extra)).get(); + try { + extra.put("hund", true); + extra.put("mund", true); + final UserEntity user = arangoDB.updateUser(username, new UserUpdateOptions().extra(extra)).get(); + assertThat(user.getExtra()).hasSize(2); + assertThat(user.getExtra()).containsKey("hund"); + assertThat(Boolean.valueOf(String.valueOf(user.getExtra().get("hund")))).isTrue(); + final UserEntity user2 = arangoDB.getUser(username).get(); + assertThat(user2.getExtra()).hasSize(2); + assertThat(user2.getExtra()).containsKey("hund"); + assertThat(Boolean.valueOf(String.valueOf(user2.getExtra().get("hund")))).isTrue(); + } finally { + arangoDB.deleteUser(username).get(); + } + } + + @ParameterizedTest + @MethodSource("asyncArangos") + void replaceUser(ArangoDBAsync arangoDB) throws ExecutionException, InterruptedException { + String username = "user-" + UUID.randomUUID(); + final Map extra = new HashMap<>(); + extra.put("hund", false); + arangoDB.createUser(username, PW, new UserCreateOptions().extra(extra)).get(); + try { + extra.remove("hund"); + extra.put("mund", true); + final UserEntity user = arangoDB.replaceUser(username, new UserUpdateOptions().extra(extra)).get(); + assertThat(user.getExtra()).hasSize(1); + assertThat(user.getExtra()).containsKey("mund"); + assertThat(Boolean.valueOf(String.valueOf(user.getExtra().get("mund")))).isTrue(); + final UserEntity user2 = arangoDB.getUser(username).get(); + assertThat(user2.getExtra()).hasSize(1); + assertThat(user2.getExtra()).containsKey("mund"); + assertThat(Boolean.valueOf(String.valueOf(user2.getExtra().get("mund")))).isTrue(); + } finally { + arangoDB.deleteUser(username).get(); + } + } + + @ParameterizedTest + @MethodSource("asyncArangos") + void updateUserDefaultDatabaseAccess(ArangoDBAsync arangoDB) throws ExecutionException, InterruptedException { + String username = "user-" + UUID.randomUUID(); + arangoDB.createUser(username, PW).get(); + try { + arangoDB.grantDefaultDatabaseAccess(username, Permissions.RW).get(); + } finally { + arangoDB.deleteUser(username).get(); + } + } + + @ParameterizedTest + @MethodSource("asyncArangos") + void updateUserDefaultCollectionAccess(ArangoDBAsync arangoDB) throws ExecutionException, InterruptedException { + String username = "user-" + UUID.randomUUID(); + arangoDB.createUser(username, PW).get(); + try { + arangoDB.grantDefaultCollectionAccess(username, Permissions.RW).get(); + } finally { + arangoDB.deleteUser(username).get(); + } + } + + @ParameterizedTest + @EnumSource(Protocol.class) + void authenticationFailPassword(Protocol protocol) { + assumeTrue(!protocol.equals(Protocol.VST) || BaseJunit5.isLessThanVersion(3, 12)); + + final ArangoDBAsync arangoDB = new ArangoDB.Builder() + .loadProperties(config) + .protocol(protocol) + .acquireHostList(false) + .password("no").jwt(null) + .build() + .async(); + Throwable thrown = catchThrowable(() -> arangoDB.getVersion().get()).getCause(); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + assertThat(((ArangoDBException) thrown).getResponseCode()).isEqualTo(401); + } + + @ParameterizedTest + @EnumSource(Protocol.class) + void authenticationFailUser(Protocol protocol) { + assumeTrue(!protocol.equals(Protocol.VST) || BaseJunit5.isLessThanVersion(3, 12)); + + final ArangoDBAsync arangoDB = new ArangoDB.Builder() + .loadProperties(config) + .protocol(protocol) + .acquireHostList(false) + .user("no").jwt(null) + .build() + .async(); + Throwable thrown = catchThrowable(() -> arangoDB.getVersion().get()).getCause(); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + assertThat(((ArangoDBException) thrown).getResponseCode()).isEqualTo(401); + } + + @ParameterizedTest + @MethodSource("asyncArangos") + void executeGetVersion(ArangoDBAsync arangoDB) throws ExecutionException, InterruptedException { + Request request = Request.builder() + .db(ArangoRequestParam.SYSTEM) + .method(Request.Method.GET) + .path("/_api/version") + .queryParam("details", "true") + .build(); + final Response response = arangoDB.execute(request, RawJson.class).get(); + JsonNode body = SerdeUtils.INSTANCE.parseJson(response.getBody().get()); + assertThat(body.get("version").isTextual()).isTrue(); + assertThat(body.get("details").isObject()).isTrue(); + assertThat(response.getResponseCode()).isEqualTo(200); + if (isAtLeastVersion(3, 9)) { + String header = response.getHeaders().get("x-arango-queue-time-seconds"); + assertThat(header).isNotNull(); + } + } + + @ParameterizedTest + @MethodSource("asyncArangos") + void getLogEntries(ArangoDBAsync arangoDB) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 8)); + final LogEntriesEntity logs = arangoDB.getLogEntries(null).get(); + assertThat(logs.getTotal()).isPositive(); + assertThat(logs.getMessages()).hasSize(logs.getTotal().intValue()); + } + + @ParameterizedTest + @MethodSource("asyncArangos") + void getLogEntriesUpto(ArangoDBAsync arangoDB) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 8)); + final LogEntriesEntity logsUpto = arangoDB.getLogEntries(new LogOptions().upto(LogLevel.WARNING)).get(); + assertThat(logsUpto.getMessages()) + .map(LogEntriesEntity.Message::getLevel) + .doesNotContain("INFO"); + } + + @ParameterizedTest + @MethodSource("asyncArangos") + void getLogEntriesLevel(ArangoDBAsync arangoDB) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 8)); + final LogEntriesEntity logsInfo = arangoDB.getLogEntries(new LogOptions().level(LogLevel.INFO)).get(); + assertThat(logsInfo.getMessages()) + .map(LogEntriesEntity.Message::getLevel) + .containsOnly("INFO"); + } + + @ParameterizedTest + @MethodSource("asyncArangos") + void getLogEntriesStart(ArangoDBAsync arangoDB) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 8)); + final LogEntriesEntity logs = arangoDB.getLogEntries(null).get(); + final Long firstId = logs.getMessages().get(0).getId(); + final LogEntriesEntity logsStart = arangoDB.getLogEntries(new LogOptions().start(firstId + 1)).get(); + assertThat(logsStart.getMessages()) + .map(LogEntriesEntity.Message::getId) + .doesNotContain(firstId); + } + + @ParameterizedTest + @MethodSource("asyncArangos") + void getLogEntriesSize(ArangoDBAsync arangoDB) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 8)); + final LogEntriesEntity logs = arangoDB.getLogEntries(null).get(); + int count = logs.getMessages().size(); + assertThat(count).isPositive(); + final LogEntriesEntity logsSize = arangoDB.getLogEntries(new LogOptions().size(count - 1)).get(); + assertThat(logsSize.getMessages()).hasSize(count - 1); + } + + @ParameterizedTest + @MethodSource("asyncArangos") + void getLogEntriesOffset(ArangoDBAsync arangoDB) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 8)); + final LogEntriesEntity logs = arangoDB.getLogEntries(null).get(); + assertThat(logs.getTotal()).isPositive(); + Long firstId = logs.getMessages().get(0).getId(); + final LogEntriesEntity logsOffset = arangoDB.getLogEntries(new LogOptions().offset(1)).get(); + assertThat(logsOffset.getMessages()) + .map(LogEntriesEntity.Message::getId) + .doesNotContain(firstId); + } + + @ParameterizedTest + @MethodSource("asyncArangos") + void getLogEntriesSearch(ArangoDBAsync arangoDB) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 8)); + final LogEntriesEntity logs = arangoDB.getLogEntries(null).get(); + final LogEntriesEntity logsSearch = arangoDB.getLogEntries(new LogOptions().search(getTestDb())).get(); + assertThat(logs.getTotal()).isGreaterThan(logsSearch.getTotal()); + } + + @ParameterizedTest + @MethodSource("asyncArangos") + void getLogEntriesSortAsc(ArangoDBAsync arangoDB) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 8)); + final LogEntriesEntity logs = arangoDB.getLogEntries(new LogOptions().sort(SortOrder.asc)).get(); + long lastId = -1; + List ids = logs.getMessages().stream() + .map(LogEntriesEntity.Message::getId) + .collect(Collectors.toList()); + for (final Long id : ids) { + assertThat(id).isGreaterThan(lastId); + lastId = id; + } + } + + @ParameterizedTest + @MethodSource("asyncArangos") + void getLogEntriesSortDesc(ArangoDBAsync arangoDB) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 8)); + final LogEntriesEntity logs = arangoDB.getLogEntries(new LogOptions().sort(SortOrder.desc)).get(); + long lastId = Long.MAX_VALUE; + List ids = logs.getMessages().stream() + .map(LogEntriesEntity.Message::getId) + .collect(Collectors.toList()); + for (final Long id : ids) { + assertThat(lastId).isGreaterThan(id); + lastId = id; + } + } + + @ParameterizedTest + @MethodSource("asyncArangos") + void getLogLevel(ArangoDBAsync arangoDB) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 7)); // it fails in 3.6 active-failover (BTS-362) + final LogLevelEntity logLevel = arangoDB.getLogLevel().get(); + assertThat(logLevel.getAgency()).isEqualTo(LogLevelEntity.LogLevel.INFO); + } + + @ParameterizedTest + @MethodSource("asyncArangos") + void setLogLevel(ArangoDBAsync arangoDB) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 7)); // it fails in 3.6 active-failover (BTS-362) + final LogLevelEntity entity = new LogLevelEntity(); + try { + entity.setAgency(LogLevelEntity.LogLevel.ERROR); + final LogLevelEntity logLevel = arangoDB.setLogLevel(entity).get(); + assertThat(logLevel.getAgency()).isEqualTo(LogLevelEntity.LogLevel.ERROR); + } finally { + entity.setAgency(LogLevelEntity.LogLevel.INFO); + arangoDB.setLogLevel(entity).get(); + } + } + + @ParameterizedTest + @MethodSource("asyncArangos") + void setAllLogLevel(ArangoDBAsync arangoDB) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 9)); + final LogLevelEntity entity = new LogLevelEntity(); + try { + entity.setAll(LogLevelEntity.LogLevel.ERROR); + final LogLevelEntity logLevel = arangoDB.setLogLevel(entity).get(); + assertThat(logLevel.getAgency()).isEqualTo(LogLevelEntity.LogLevel.ERROR); + assertThat(logLevel.getQueries()).isEqualTo(LogLevelEntity.LogLevel.ERROR); + LogLevelEntity retrievedLevels = arangoDB.getLogLevel().get(); + assertThat(retrievedLevels.getAgency()).isEqualTo(LogLevelEntity.LogLevel.ERROR); + } finally { + entity.setAll(LogLevelEntity.LogLevel.INFO); + arangoDB.setLogLevel(entity).get(); + } + } + + @ParameterizedTest + @MethodSource("asyncArangos") + void logLevelWithServerId(ArangoDBAsync arangoDB) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 10)); + assumeTrue(isCluster()); + String serverId = arangoDB.getServerId().get(); + LogLevelOptions options = new LogLevelOptions().serverId(serverId); + final LogLevelEntity entity = new LogLevelEntity(); + try { + entity.setGraphs(LogLevelEntity.LogLevel.ERROR); + final LogLevelEntity logLevel = arangoDB.setLogLevel(entity, options).get(); + assertThat(logLevel.getGraphs()).isEqualTo(LogLevelEntity.LogLevel.ERROR); + assertThat(arangoDB.getLogLevel(options).get().getGraphs()).isEqualTo(LogLevelEntity.LogLevel.ERROR); + } finally { + entity.setGraphs(LogLevelEntity.LogLevel.INFO); + arangoDB.setLogLevel(entity).get(); + } + } + + @ParameterizedTest + @MethodSource("asyncArangos") + void resetLogLevels(ArangoDBAsync arangoDB) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 12)); + LogLevelOptions options = new LogLevelOptions(); + LogLevelEntity entity = new LogLevelEntity(); + entity.setGraphs(LogLevelEntity.LogLevel.ERROR); + + LogLevelEntity err = arangoDB.setLogLevel(entity, options).get(); + assertThat(err.getGraphs()).isEqualTo(LogLevelEntity.LogLevel.ERROR); + + LogLevelEntity logLevel = arangoDB.resetLogLevels(options).get(); + assertThat(logLevel.getGraphs()).isEqualTo(LogLevelEntity.LogLevel.INFO); + } + + @ParameterizedTest + @MethodSource("asyncArangos") + void resetLogLevelsWithServerId(ArangoDBAsync arangoDB) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 12)); + assumeTrue(isCluster()); + String serverId = arangoDB.getServerId().get(); + LogLevelOptions options = new LogLevelOptions().serverId(serverId); + + LogLevelEntity entity = new LogLevelEntity(); + entity.setGraphs(LogLevelEntity.LogLevel.ERROR); + + LogLevelEntity err = arangoDB.setLogLevel(entity, options).get(); + assertThat(err.getGraphs()).isEqualTo(LogLevelEntity.LogLevel.ERROR); + + LogLevelEntity logLevel = arangoDB.resetLogLevels(options).get(); + assertThat(logLevel.getGraphs()).isEqualTo(LogLevelEntity.LogLevel.INFO); + } + + @ParameterizedTest + @MethodSource("asyncArangos") + void getQueryOptimizerRules(ArangoDBAsync arangoDB) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 10)); + final Collection rules = arangoDB.getQueryOptimizerRules().get(); + assertThat(rules).isNotEmpty(); + for (QueryOptimizerRule rule : rules) { + assertThat(rule).isNotNull(); + assertThat(rule.getName()).isNotNull(); + QueryOptimizerRule.Flags flags = rule.getFlags(); + assertThat(flags.getHidden()).isNotNull(); + assertThat(flags.getClusterOnly()).isNotNull(); + assertThat(flags.getCanBeDisabled()).isNotNull(); + assertThat(flags.getCanCreateAdditionalPlans()).isNotNull(); + assertThat(flags.getDisabledByDefault()).isNotNull(); + assertThat(flags.getEnterpriseOnly()).isNotNull(); + } + } + + @ParameterizedTest + @MethodSource("asyncArangos") + void arangoDBException(ArangoDBAsync arangoDB) { + Throwable thrown = catchThrowable(() -> arangoDB.db("no").getInfo().get()).getCause(); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + ArangoDBException e = (ArangoDBException) thrown; + assertThat(e.getResponseCode()).isEqualTo(404); + assertThat(e.getErrorNum()).isEqualTo(1228); + } + + @ParameterizedTest + @MethodSource("asyncArangos") + void accessMultipleDatabases(ArangoDBAsync arangoDB) throws ExecutionException, InterruptedException { + final ArangoDBVersion version1 = arangoDB.db(DB1).getVersion().get(); + assertThat(version1).isNotNull(); + final ArangoDBVersion version2 = arangoDB.db(DB2).getVersion().get(); + assertThat(version2).isNotNull(); + } + + @ParameterizedTest + @MethodSource("asyncArangos") + @Disabled("Manual execution only") + void queueTime(ArangoDBAsync arangoDB) throws InterruptedException, ExecutionException { + List> futures = IntStream.range(0, 80) + .mapToObj(i -> arangoDB.db().query("RETURN SLEEP(1)", Void.class)) + .collect(Collectors.toList()); + for (CompletableFuture f : futures) { + f.get(); + } + + QueueTimeMetrics qt = arangoDB.metrics().getQueueTime(); + double avg = qt.getAvg(); + QueueTimeSample[] values = qt.getValues(); + if (isAtLeastVersion(3, 9)) { + assertThat(values).hasSize(20); + for (int i = 0; i < values.length; i++) { + assertThat(values[i].value).isNotNegative(); + if (i > 0) { + assertThat(values[i].timestamp).isGreaterThanOrEqualTo(values[i - 1].timestamp); + } + } + + if (avg < 0.0) { + System.err.println("avg < 0: " + avg); + System.err.println("got values:"); + for (QueueTimeSample v : values) { + System.err.println(v.value); + } + } + assertThat(avg).isNotNegative(); + } else { + assertThat(avg).isEqualTo(0.0); + assertThat(values).isEmpty(); + } + + } +} diff --git a/test-functional/src/test/java/com/arangodb/ArangoDBTest.java b/test-functional/src/test/java/com/arangodb/ArangoDBTest.java new file mode 100644 index 000000000..030d3d6a7 --- /dev/null +++ b/test-functional/src/test/java/com/arangodb/ArangoDBTest.java @@ -0,0 +1,769 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb; + +import com.arangodb.config.ConfigUtils; +import com.arangodb.entity.*; +import com.arangodb.internal.ArangoRequestParam; +import com.arangodb.internal.serde.SerdeUtils; +import com.arangodb.model.*; +import com.arangodb.model.LogOptions.SortOrder; +import com.arangodb.util.RawJson; +import com.arangodb.util.SlowTest; +import com.arangodb.util.UnicodeUtils; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.node.JsonNodeFactory; +import com.fasterxml.jackson.databind.node.ObjectNode; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.EnumSource; +import org.junit.jupiter.params.provider.MethodSource; + +import java.util.*; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Executors; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.catchThrowable; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + +/** + * @author Mark Vollmary + * @author Reşat SABIQ + * @author Michele Rastelli + */ +class ArangoDBTest extends BaseJunit5 { + + private static final String DB1 = "ArangoDBTest_db1"; + private static final String DB2 = "ArangoDBTest_db2"; + + private static final String ROOT = "root"; + private static final String PW = "machts der hund"; + + @BeforeAll + static void initDBs() { + initDB(DB1); + initDB(DB2); + } + + @AfterAll + static void shutdown() { + dropDB(DB1); + dropDB(DB2); + } + + @ParameterizedTest + @MethodSource("arangos") + void getVersion(ArangoDB arangoDB) { + final ArangoDBVersion version = arangoDB.getVersion(); + assertThat(version.getServer()).isNotNull(); + assertThat(version.getVersion()).isNotNull(); + } + + @SlowTest + @ParameterizedTest + @MethodSource("arangos") + void createAndDeleteDatabase(ArangoDB arangoDB) { + final String dbName = rndDbName(); + final Boolean resultCreate; + resultCreate = arangoDB.createDatabase(dbName); + assertThat(resultCreate).isTrue(); + final Boolean resultDelete = arangoDB.db(dbName).drop(); + assertThat(resultDelete).isTrue(); + } + + @SlowTest + @ParameterizedTest + @MethodSource("arangos") + void createWithNotNormalizedName(ArangoDB arangoDB) { + assumeTrue(supportsExtendedDbNames()); + + final String dbName = "testDB-\u006E\u0303\u00f1"; + String normalized = UnicodeUtils.normalize(dbName); + arangoDB.createDatabase(normalized); + arangoDB.db(normalized).drop(); + + Throwable thrown = catchThrowable(() -> arangoDB.createDatabase(dbName)); + assertThat(thrown) + .isInstanceOf(ArangoDBException.class) + .hasMessageContaining("normalized"); + } + + @SlowTest + @ParameterizedTest + @MethodSource("arangos") + void createDatabaseWithOptions(ArangoDB arangoDB) { + assumeTrue(isCluster()); + assumeTrue(isAtLeastVersion(3, 6)); + final String dbName = rndDbName(); + final Boolean resultCreate = arangoDB.createDatabase(new DBCreateOptions() + .name(dbName) + .options(new DatabaseOptions() + .writeConcern(2) + .replicationFactor(2) + .sharding("") + ) + ); + assertThat(resultCreate).isTrue(); + + DatabaseEntity info = arangoDB.db(dbName).getInfo(); + assertThat(info.getReplicationFactor().get()).isEqualTo(2); + assertThat(info.getWriteConcern()).isEqualTo(2); + assertThat(info.getSharding()).isEmpty(); + + final Boolean resultDelete = arangoDB.db(dbName).drop(); + assertThat(resultDelete).isTrue(); + } + + @SlowTest + @ParameterizedTest + @MethodSource("arangos") + void createDatabaseWithOptionsSatellite(ArangoDB arangoDB) { + assumeTrue(isCluster()); + assumeTrue(isEnterprise()); + assumeTrue(isAtLeastVersion(3, 6)); + + final String dbName = rndDbName(); + final Boolean resultCreate = arangoDB.createDatabase(new DBCreateOptions() + .name(dbName) + .options(new DatabaseOptions() + .writeConcern(2) + .replicationFactor(ReplicationFactor.ofSatellite()) + .sharding("") + ) + ); + assertThat(resultCreate).isTrue(); + + DatabaseEntity info = arangoDB.db(dbName).getInfo(); + assertThat(info.getReplicationFactor()).isEqualTo(ReplicationFactor.ofSatellite()); + assertThat(info.getWriteConcern()).isEqualTo(2); + assertThat(info.getSharding()).isEmpty(); + + final Boolean resultDelete = arangoDB.db(dbName).drop(); + assertThat(resultDelete).isTrue(); + } + + @SlowTest + @ParameterizedTest + @MethodSource("arangos") + void createDatabaseWithUsers(ArangoDB arangoDB) throws InterruptedException { + final String dbName = rndDbName(); + final Map extra = Collections.singletonMap("key", "value"); + final Boolean resultCreate = arangoDB.createDatabase(new DBCreateOptions() + .name(dbName) + .users(Collections.singletonList(new DatabaseUsersOptions() + .active(true) + .username("testUser") + .passwd("testPasswd") + .extra(extra) + )) + ); + assertThat(resultCreate).isTrue(); + + DatabaseEntity info = arangoDB.db(dbName).getInfo(); + assertThat(info.getName()).isEqualTo(dbName); + + Optional retrievedUserOptional = arangoDB.getUsers().stream() + .filter(it -> it.getUser().equals("testUser")) + .findFirst(); + assertThat(retrievedUserOptional).isPresent(); + + UserEntity retrievedUser = retrievedUserOptional.get(); + assertThat(retrievedUser.getActive()).isTrue(); + assertThat(retrievedUser.getExtra()).isEqualTo(extra); + + // needed for active-failover tests only + Thread.sleep(2_000); + + ArangoDB arangoDBTestUser = new ArangoDB.Builder() + .loadProperties(config) + .user("testUser") + .password("testPasswd") + .build(); + + // check if testUser has been created and can access the created db + ArangoCollection collection = arangoDBTestUser.db(dbName).collection("col-" + UUID.randomUUID()); + collection.create(); + arangoDBTestUser.shutdown(); + + final Boolean resultDelete = arangoDB.db(dbName).drop(); + assertThat(resultDelete).isTrue(); + } + + @ParameterizedTest + @MethodSource("arangos") + void getDatabases(ArangoDB arangoDB) { + Collection dbs = arangoDB.getDatabases(); + assertThat(dbs).contains("_system", DB1); + } + + @ParameterizedTest + @MethodSource("arangos") + void getAccessibleDatabases(ArangoDB arangoDB) { + final Collection dbs = arangoDB.getAccessibleDatabases(); + assertThat(dbs).contains("_system"); + } + + @ParameterizedTest + @MethodSource("arangos") + void getAccessibleDatabasesFor(ArangoDB arangoDB) { + final Collection dbs = arangoDB.getAccessibleDatabasesFor("root"); + assertThat(dbs).contains("_system"); + } + + @ParameterizedTest + @MethodSource("arangos") + void createUser(ArangoDB arangoDB) { + String username = "user-" + UUID.randomUUID(); + final UserEntity result = arangoDB.createUser(username, PW, null); + try { + assertThat(result.getUser()).isEqualTo(username); + } finally { + arangoDB.deleteUser(username); + } + } + + @ParameterizedTest + @MethodSource("arangos") + void deleteUser(ArangoDB arangoDB) { + String username = "user-" + UUID.randomUUID(); + arangoDB.createUser(username, PW, null); + arangoDB.deleteUser(username); + } + + @ParameterizedTest + @MethodSource("arangos") + void getUserRoot(ArangoDB arangoDB) { + final UserEntity user = arangoDB.getUser(ROOT); + assertThat(user.getUser()).isEqualTo(ROOT); + } + + @ParameterizedTest + @MethodSource("arangos") + void getUser(ArangoDB arangoDB) { + String username = "user-" + UUID.randomUUID(); + arangoDB.createUser(username, PW, null); + final UserEntity user = arangoDB.getUser(username); + assertThat(user.getUser()).isEqualTo(username); + } + + @ParameterizedTest + @MethodSource("arangos") + void getUsersOnlyRoot(ArangoDB arangoDB) { + final Collection users = arangoDB.getUsers(); + assertThat(users).isNotEmpty(); + } + + @ParameterizedTest + @MethodSource("arangos") + void getUsers(ArangoDB arangoDB) { + String username = "user-" + UUID.randomUUID(); + // Allow & account for pre-existing users other than ROOT: + final Collection initialUsers = arangoDB.getUsers(); + + arangoDB.createUser(username, PW, null); + try { + final Collection users = arangoDB.getUsers(); + assertThat(users).hasSize(initialUsers.size() + 1); + + final List expected = new ArrayList<>(users.size()); + // Add initial users, including root: + for (final UserEntity userEntity : initialUsers) { + expected.add(userEntity.getUser()); + } + // Add username: + expected.add(username); + + for (final UserEntity user : users) { + assertThat(user.getUser()).isIn(expected); + } + } finally { + arangoDB.deleteUser(username); + } + } + + @ParameterizedTest + @MethodSource("arangos") + void updateUserNoOptions(ArangoDB arangoDB) { + String username = "user-" + UUID.randomUUID(); + arangoDB.createUser(username, PW, null); + try { + arangoDB.updateUser(username, null); + } finally { + arangoDB.deleteUser(username); + } + } + + @ParameterizedTest + @MethodSource("arangos") + void updateUser(ArangoDB arangoDB) { + String username = "user-" + UUID.randomUUID(); + final Map extra = new HashMap<>(); + extra.put("hund", false); + arangoDB.createUser(username, PW, new UserCreateOptions().extra(extra)); + try { + extra.put("hund", true); + extra.put("mund", true); + final UserEntity user = arangoDB.updateUser(username, new UserUpdateOptions().extra(extra)); + assertThat(user.getExtra()).hasSize(2); + assertThat(user.getExtra()).containsKey("hund"); + assertThat(Boolean.valueOf(String.valueOf(user.getExtra().get("hund")))).isTrue(); + final UserEntity user2 = arangoDB.getUser(username); + assertThat(user2.getExtra()).hasSize(2); + assertThat(user2.getExtra()).containsKey("hund"); + assertThat(Boolean.valueOf(String.valueOf(user2.getExtra().get("hund")))).isTrue(); + } finally { + arangoDB.deleteUser(username); + } + } + + @ParameterizedTest + @MethodSource("arangos") + void replaceUser(ArangoDB arangoDB) { + String username = "user-" + UUID.randomUUID(); + final Map extra = new HashMap<>(); + extra.put("hund", false); + arangoDB.createUser(username, PW, new UserCreateOptions().extra(extra)); + try { + extra.remove("hund"); + extra.put("mund", true); + final UserEntity user = arangoDB.replaceUser(username, new UserUpdateOptions().extra(extra)); + assertThat(user.getExtra()).hasSize(1); + assertThat(user.getExtra()).containsKey("mund"); + assertThat(Boolean.valueOf(String.valueOf(user.getExtra().get("mund")))).isTrue(); + final UserEntity user2 = arangoDB.getUser(username); + assertThat(user2.getExtra()).hasSize(1); + assertThat(user2.getExtra()).containsKey("mund"); + assertThat(Boolean.valueOf(String.valueOf(user2.getExtra().get("mund")))).isTrue(); + } finally { + arangoDB.deleteUser(username); + } + } + + @ParameterizedTest + @MethodSource("arangos") + void updateUserDefaultDatabaseAccess(ArangoDB arangoDB) { + String username = "user-" + UUID.randomUUID(); + arangoDB.createUser(username, PW); + try { + arangoDB.grantDefaultDatabaseAccess(username, Permissions.RW); + } finally { + arangoDB.deleteUser(username); + } + } + + @ParameterizedTest + @MethodSource("arangos") + void updateUserDefaultCollectionAccess(ArangoDB arangoDB) { + String username = "user-" + UUID.randomUUID(); + arangoDB.createUser(username, PW); + try { + arangoDB.grantDefaultCollectionAccess(username, Permissions.RW); + } finally { + arangoDB.deleteUser(username); + } + } + + @ParameterizedTest + @EnumSource(Protocol.class) + void authenticationFailPassword(Protocol protocol) { + assumeTrue(!protocol.equals(Protocol.VST) || BaseJunit5.isLessThanVersion(3, 12)); + + final ArangoDB arangoDB = new ArangoDB.Builder() + .loadProperties(config) + .protocol(protocol) + .acquireHostList(false) + .password("no").jwt(null).build(); + Throwable thrown = catchThrowable(arangoDB::getVersion); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + assertThat(((ArangoDBException) thrown).getResponseCode()).isEqualTo(401); + } + + @ParameterizedTest + @EnumSource(Protocol.class) + void authenticationFailUser(Protocol protocol) { + assumeTrue(!protocol.equals(Protocol.VST) || BaseJunit5.isLessThanVersion(3, 12)); + + final ArangoDB arangoDB = new ArangoDB.Builder() + .loadProperties(config) + .protocol(protocol) + .acquireHostList(false) + .user("no").jwt(null).build(); + Throwable thrown = catchThrowable(arangoDB::getVersion); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + assertThat(((ArangoDBException) thrown).getResponseCode()).isEqualTo(401); + } + + @ParameterizedTest + @MethodSource("arangos") + void executeGetVersion(ArangoDB arangoDB) { + Request request = Request.builder() + .db(ArangoRequestParam.SYSTEM) + .method(Request.Method.GET) + .path("/_api/version") + .queryParam("details", "true") + .build(); + final Response response = arangoDB.execute(request, RawJson.class); + JsonNode body = SerdeUtils.INSTANCE.parseJson(response.getBody().get()); + assertThat(body.get("version").isTextual()).isTrue(); + assertThat(body.get("details").isObject()).isTrue(); + assertThat(response.getResponseCode()).isEqualTo(200); + if (isAtLeastVersion(3, 9)) { + String header = response.getHeaders().get("x-arango-queue-time-seconds"); + assertThat(header).isNotNull(); + } + } + + @ParameterizedTest + @MethodSource("arangos") + void executeJS(ArangoDB arangoDB) { + assumeTrue(isAtLeastVersion(3, 11)); + Request request = Request.builder() + .db(ArangoRequestParam.SYSTEM) + .method(Request.Method.POST) + .path("/_admin/execute") + .body(JsonNodeFactory.instance.textNode("return 11;")) + .build(); + final Response response = arangoDB.execute(request, Integer.class); + assertThat(response.getBody()).isEqualTo(11); + } + + @ParameterizedTest + @MethodSource("arangos") + void getLogEntries(ArangoDB arangoDB) { + assumeTrue(isAtLeastVersion(3, 8)); + final LogEntriesEntity logs = arangoDB.getLogEntries(null); + assertThat(logs.getTotal()).isPositive(); + assertThat(logs.getMessages()).hasSize(logs.getTotal().intValue()); + } + + @ParameterizedTest + @MethodSource("arangos") + void getLogEntriesUpto(ArangoDB arangoDB) { + assumeTrue(isAtLeastVersion(3, 8)); + final LogEntriesEntity logsUpto = arangoDB.getLogEntries(new LogOptions().upto(LogLevel.WARNING)); + assertThat(logsUpto.getMessages()) + .map(LogEntriesEntity.Message::getLevel) + .doesNotContain("INFO"); + } + + @ParameterizedTest + @MethodSource("arangos") + void getLogEntriesLevel(ArangoDB arangoDB) { + assumeTrue(isAtLeastVersion(3, 8)); + final LogEntriesEntity logsInfo = arangoDB.getLogEntries(new LogOptions().level(LogLevel.INFO)); + assertThat(logsInfo.getMessages()) + .map(LogEntriesEntity.Message::getLevel) + .containsOnly("INFO"); + } + + @ParameterizedTest + @MethodSource("arangos") + void getLogEntriesStart(ArangoDB arangoDB) { + assumeTrue(isAtLeastVersion(3, 8)); + final LogEntriesEntity logs = arangoDB.getLogEntries(null); + final Long firstId = logs.getMessages().get(0).getId(); + final LogEntriesEntity logsStart = arangoDB.getLogEntries(new LogOptions().start(firstId + 1)); + assertThat(logsStart.getMessages()) + .map(LogEntriesEntity.Message::getId) + .doesNotContain(firstId); + } + + @ParameterizedTest + @MethodSource("arangos") + void getLogEntriesSize(ArangoDB arangoDB) { + assumeTrue(isAtLeastVersion(3, 8)); + final LogEntriesEntity logs = arangoDB.getLogEntries(null); + int count = logs.getMessages().size(); + assertThat(count).isPositive(); + final LogEntriesEntity logsSize = arangoDB.getLogEntries(new LogOptions().size(count - 1)); + assertThat(logsSize.getMessages()).hasSize(count - 1); + } + + @ParameterizedTest + @MethodSource("arangos") + void getLogEntriesOffset(ArangoDB arangoDB) { + assumeTrue(isAtLeastVersion(3, 8)); + final LogEntriesEntity logs = arangoDB.getLogEntries(null); + assertThat(logs.getTotal()).isPositive(); + Long firstId = logs.getMessages().get(0).getId(); + final LogEntriesEntity logsOffset = arangoDB.getLogEntries(new LogOptions().offset(1)); + assertThat(logsOffset.getMessages()) + .map(LogEntriesEntity.Message::getId) + .doesNotContain(firstId); + } + + @ParameterizedTest + @MethodSource("arangos") + void getLogEntriesSearch(ArangoDB arangoDB) { + assumeTrue(isAtLeastVersion(3, 8)); + final LogEntriesEntity logs = arangoDB.getLogEntries(null); + final LogEntriesEntity logsSearch = arangoDB.getLogEntries(new LogOptions().search(getTestDb())); + assertThat(logs.getTotal()).isGreaterThan(logsSearch.getTotal()); + } + + @ParameterizedTest + @MethodSource("arangos") + void getLogEntriesSortAsc(ArangoDB arangoDB) { + assumeTrue(isAtLeastVersion(3, 8)); + final LogEntriesEntity logs = arangoDB.getLogEntries(new LogOptions().sort(SortOrder.asc)); + long lastId = -1; + List ids = logs.getMessages().stream() + .map(LogEntriesEntity.Message::getId) + .collect(Collectors.toList()); + for (final Long id : ids) { + assertThat(id).isGreaterThan(lastId); + lastId = id; + } + } + + @ParameterizedTest + @MethodSource("arangos") + void getLogEntriesSortDesc(ArangoDB arangoDB) { + assumeTrue(isAtLeastVersion(3, 8)); + final LogEntriesEntity logs = arangoDB.getLogEntries(new LogOptions().sort(SortOrder.desc)); + long lastId = Long.MAX_VALUE; + List ids = logs.getMessages().stream() + .map(LogEntriesEntity.Message::getId) + .collect(Collectors.toList()); + for (final Long id : ids) { + assertThat(lastId).isGreaterThan(id); + lastId = id; + } + } + + @ParameterizedTest + @MethodSource("arangos") + void getLogLevel(ArangoDB arangoDB) { + assumeTrue(isAtLeastVersion(3, 7)); // it fails in 3.6 active-failover (BTS-362) + final LogLevelEntity logLevel = arangoDB.getLogLevel(); + assertThat(logLevel.getAgency()).isEqualTo(LogLevelEntity.LogLevel.INFO); + } + + @ParameterizedTest + @MethodSource("arangos") + void setLogLevel(ArangoDB arangoDB) { + assumeTrue(isAtLeastVersion(3, 7)); // it fails in 3.6 active-failover (BTS-362) + final LogLevelEntity entity = new LogLevelEntity(); + try { + entity.setAgency(LogLevelEntity.LogLevel.ERROR); + final LogLevelEntity logLevel = arangoDB.setLogLevel(entity); + assertThat(logLevel.getAgency()).isEqualTo(LogLevelEntity.LogLevel.ERROR); + } finally { + entity.setAgency(LogLevelEntity.LogLevel.INFO); + arangoDB.setLogLevel(entity); + } + } + + @ParameterizedTest + @MethodSource("arangos") + void setAllLogLevel(ArangoDB arangoDB) { + assumeTrue(isAtLeastVersion(3, 12)); + final LogLevelEntity entity = new LogLevelEntity(); + try { + entity.setAll(LogLevelEntity.LogLevel.ERROR); + final LogLevelEntity logLevel = arangoDB.setLogLevel(entity); + assertThat(logLevel.getAgency()).isEqualTo(LogLevelEntity.LogLevel.ERROR); + assertThat(logLevel.getQueries()).isEqualTo(LogLevelEntity.LogLevel.ERROR); + assertThat(logLevel.getRepWal()).isEqualTo(LogLevelEntity.LogLevel.ERROR); + assertThat(logLevel.getRepState()).isEqualTo(LogLevelEntity.LogLevel.ERROR); + LogLevelEntity retrievedLevels = arangoDB.getLogLevel(); + assertThat(retrievedLevels.getAgency()).isEqualTo(LogLevelEntity.LogLevel.ERROR); + } finally { + entity.setAll(LogLevelEntity.LogLevel.INFO); + arangoDB.setLogLevel(entity); + } + } + + @ParameterizedTest + @MethodSource("arangos") + void logLevelWithServerId(ArangoDB arangoDB) { + assumeTrue(isAtLeastVersion(3, 10)); + assumeTrue(isCluster()); + String serverId = arangoDB.getServerId(); + LogLevelOptions options = new LogLevelOptions().serverId(serverId); + final LogLevelEntity entity = new LogLevelEntity(); + try { + entity.setGraphs(LogLevelEntity.LogLevel.ERROR); + final LogLevelEntity logLevel = arangoDB.setLogLevel(entity, options); + assertThat(logLevel.getGraphs()).isEqualTo(LogLevelEntity.LogLevel.ERROR); + assertThat(arangoDB.getLogLevel(options).getGraphs()).isEqualTo(LogLevelEntity.LogLevel.ERROR); + } finally { + entity.setGraphs(LogLevelEntity.LogLevel.INFO); + arangoDB.setLogLevel(entity); + } + } + + @ParameterizedTest + @MethodSource("arangos") + void resetLogLevels(ArangoDB arangoDB) { + assumeTrue(isAtLeastVersion(3, 12)); + LogLevelOptions options = new LogLevelOptions(); + LogLevelEntity entity = new LogLevelEntity(); + entity.setGraphs(LogLevelEntity.LogLevel.ERROR); + + LogLevelEntity err = arangoDB.setLogLevel(entity, options); + assertThat(err.getGraphs()).isEqualTo(LogLevelEntity.LogLevel.ERROR); + + LogLevelEntity logLevel = arangoDB.resetLogLevels(options); + assertThat(logLevel.getGraphs()).isEqualTo(LogLevelEntity.LogLevel.INFO); + } + + @ParameterizedTest + @MethodSource("arangos") + void resetLogLevelsWithServerId(ArangoDB arangoDB) { + assumeTrue(isAtLeastVersion(3, 12)); + assumeTrue(isCluster()); + String serverId = arangoDB.getServerId(); + LogLevelOptions options = new LogLevelOptions().serverId(serverId); + + LogLevelEntity entity = new LogLevelEntity(); + entity.setGraphs(LogLevelEntity.LogLevel.ERROR); + + LogLevelEntity err = arangoDB.setLogLevel(entity, options); + assertThat(err.getGraphs()).isEqualTo(LogLevelEntity.LogLevel.ERROR); + + LogLevelEntity logLevel = arangoDB.resetLogLevels(options); + assertThat(logLevel.getGraphs()).isEqualTo(LogLevelEntity.LogLevel.INFO); + } + + @ParameterizedTest + @MethodSource("arangos") + void getQueryOptimizerRules(ArangoDB arangoDB) { + assumeTrue(isAtLeastVersion(3, 10)); + final Collection rules = arangoDB.getQueryOptimizerRules(); + assertThat(rules).isNotEmpty(); + for (QueryOptimizerRule rule : rules) { + assertThat(rule).isNotNull(); + assertThat(rule.getName()).isNotNull(); + QueryOptimizerRule.Flags flags = rule.getFlags(); + assertThat(flags.getHidden()).isNotNull(); + assertThat(flags.getClusterOnly()).isNotNull(); + assertThat(flags.getCanBeDisabled()).isNotNull(); + assertThat(flags.getCanCreateAdditionalPlans()).isNotNull(); + assertThat(flags.getDisabledByDefault()).isNotNull(); + assertThat(flags.getEnterpriseOnly()).isNotNull(); + } + } + + @ParameterizedTest + @MethodSource("arangos") + void arangoDBException(ArangoDB arangoDB) { + Throwable thrown = catchThrowable(() -> arangoDB.db("no").getInfo()); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + ArangoDBException e = (ArangoDBException) thrown; + assertThat(e.getResponseCode()).isEqualTo(404); + assertThat(e.getErrorNum()).isEqualTo(1228); + } + + @ParameterizedTest + @MethodSource("arangos") + void loadproperties() { + Throwable thrown = catchThrowable(() -> new ArangoDB.Builder() + .loadProperties(ConfigUtils.loadConfig("arangodb-bad.properties")) + ); + assertThat(thrown).isInstanceOf(IllegalArgumentException.class); + } + + @ParameterizedTest + @MethodSource("arangos") + void accessMultipleDatabases(ArangoDB arangoDB) { + final ArangoDBVersion version1 = arangoDB.db(DB1).getVersion(); + assertThat(version1).isNotNull(); + final ArangoDBVersion version2 = arangoDB.db(DB2).getVersion(); + assertThat(version2).isNotNull(); + } + + @ParameterizedTest + @MethodSource("arangos") + @Disabled("Manual execution only") + void queueTime(ArangoDB arangoDB) throws InterruptedException, ExecutionException { + List> futures = IntStream.range(0, 80) + .mapToObj(i -> CompletableFuture.runAsync( + () -> arangoDB.db().query("RETURN SLEEP(1)", Void.class), + Executors.newFixedThreadPool(80)) + ) + .collect(Collectors.toList()); + for (CompletableFuture f : futures) { + f.get(); + } + + QueueTimeMetrics qt = arangoDB.metrics().getQueueTime(); + double avg = qt.getAvg(); + QueueTimeSample[] values = qt.getValues(); + if (isAtLeastVersion(3, 9)) { + assertThat(values).hasSize(20); + for (int i = 0; i < values.length; i++) { + assertThat(values[i].value).isNotNegative(); + if (i > 0) { + assertThat(values[i].timestamp).isGreaterThanOrEqualTo(values[i - 1].timestamp); + } + } + + if (avg < 0.0) { + System.err.println("avg < 0: " + avg); + System.err.println("got values:"); + for (QueueTimeSample v : values) { + System.err.println(v.value); + } + } + assertThat(avg).isNotNegative(); + } else { + assertThat(avg).isEqualTo(0.0); + assertThat(values).isEmpty(); + } + } + + @ParameterizedTest + @MethodSource("arangos") + void asyncAndLaterResultRetrieval(ArangoDB arangoDB) throws InterruptedException { + Request request = Request.builder() + .db(ArangoRequestParam.SYSTEM) + .method(Request.Method.POST) + .path("/_api/cursor") + .header("x-arango-async", "store") + .body(RawJson.of("{\"query\":\"RETURN SLEEP(0.1) || 5\"}")) + .build(); + + Response response = arangoDB.execute(request, Void.class); + String jobId = response.getHeaders().get("x-arango-async-id"); + + Request request2 = Request.builder() + .db(ArangoRequestParam.SYSTEM) + .method(Request.Method.PUT) + .path("/_api/job/" + jobId) + .build(); + + Response response2 = arangoDB.execute(request2, ObjectNode.class); + while (response2.getResponseCode() == 204) { + Thread.sleep(50); + response2 = arangoDB.execute(request2, ObjectNode.class); + } + + assertThat(response2.getResponseCode()).isEqualTo(201); + assertThat(response2.getBody().get("result").get(0).numberValue()).isEqualTo(5); + } +} diff --git a/test-functional/src/test/java/com/arangodb/ArangoDatabaseAsyncTest.java b/test-functional/src/test/java/com/arangodb/ArangoDatabaseAsyncTest.java new file mode 100644 index 000000000..67af254ea --- /dev/null +++ b/test-functional/src/test/java/com/arangodb/ArangoDatabaseAsyncTest.java @@ -0,0 +1,1719 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb; + +import com.arangodb.entity.*; +import com.arangodb.entity.QueryCachePropertiesEntity.CacheMode; +import com.arangodb.internal.serde.InternalSerde; +import com.arangodb.model.*; +import com.arangodb.util.*; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.node.ArrayNode; +import com.fasterxml.jackson.databind.node.JsonNodeFactory; +import com.fasterxml.jackson.databind.node.ObjectNode; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; + +import java.util.*; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.catchThrowable; +import static org.assertj.core.api.InstanceOfAssertFactories.*; +import static org.assertj.core.api.InstanceOfAssertFactories.DOUBLE; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + + +/** + * @author Mark Vollmary + * @author Michele Rastelli + */ +class ArangoDatabaseAsyncTest extends BaseJunit5 { + + private static final String CNAME1 = "ArangoDatabaseTest_collection_1"; + private static final String CNAME2 = "ArangoDatabaseTest_collection_2"; + private static final String ENAMES = "ArangoDatabaseTest_edge_collection"; + + @BeforeAll + static void init() { + BaseJunit5.initDB(); + BaseJunit5.initCollections(CNAME1, CNAME2); + BaseJunit5.initEdgeCollections(ENAMES); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void getVersion(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + final ArangoDBVersion version = db.getVersion().get(); + assertThat(version).isNotNull(); + assertThat(version.getServer()).isNotNull(); + assertThat(version.getVersion()).isNotNull(); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void getEngine(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + final ArangoDBEngine engine = db.getEngine().get(); + assertThat(engine).isNotNull(); + assertThat(engine.getName()).isNotNull(); + } + + @ParameterizedTest + @MethodSource("asyncArangos") + void exists(ArangoDBAsync arangoDB) throws ExecutionException, InterruptedException { + assertThat(arangoDB.db(getTestDb()).exists().get()).isTrue(); + assertThat(arangoDB.db("no").exists().get()).isFalse(); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void getAccessibleDatabases(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + final Collection dbs = db.getAccessibleDatabases().get(); + assertThat(dbs).contains("_system"); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void createCollection(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + String name = rndName(); + final CollectionEntity result = db.createCollection(name, null).get(); + assertThat(result).isNotNull(); + assertThat(result.getId()).isNotNull(); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void createCollectionWithNotNormalizedName(ArangoDatabaseAsync db) { + assumeTrue(supportsExtendedNames()); + final String colName = "testCol-\u006E\u0303\u00f1"; + + Throwable thrown = catchThrowable(() -> db.createCollection(colName).get()).getCause(); + assertThat(thrown) + .isInstanceOf(ArangoDBException.class) + .hasMessageContaining("normalized") + .extracting(it -> ((ArangoDBException) it).getResponseCode()).isEqualTo(400); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void createCollectionWithReplicationFactor(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isCluster()); + String name = rndName(); + final CollectionEntity result = db + .createCollection(name, new CollectionCreateOptions().replicationFactor(2)).get(); + assertThat(result).isNotNull(); + assertThat(result.getId()).isNotNull(); + CollectionPropertiesEntity props = db.collection(name).getProperties().get(); + assertThat(props.getReplicationFactor().get()).isEqualTo(2); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void createCollectionWithWriteConcern(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isCluster()); + + String name = rndName(); + final CollectionEntity result = db.createCollection(name, + new CollectionCreateOptions().replicationFactor(2).writeConcern(2)).get(); + assertThat(result).isNotNull(); + assertThat(result.getId()).isNotNull(); + CollectionPropertiesEntity props = db.collection(name).getProperties().get(); + assertThat(props.getReplicationFactor().get()).isEqualTo(2); + assertThat(props.getWriteConcern()).isEqualTo(2); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void createSatelliteCollection(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isEnterprise()); + assumeTrue(isCluster()); + + String name = rndName(); + final CollectionEntity result = db + .createCollection(name, new CollectionCreateOptions().replicationFactor(ReplicationFactor.ofSatellite())).get(); + + assertThat(result).isNotNull(); + assertThat(result.getId()).isNotNull(); + CollectionPropertiesEntity props = db.collection(name).getProperties().get(); + assertThat(props.getReplicationFactor()).isEqualTo(ReplicationFactor.ofSatellite()); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void createCollectionWithNumberOfShards(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isCluster()); + String name = rndName(); + final CollectionEntity result = db + .createCollection(name, new CollectionCreateOptions().numberOfShards(2)).get(); + + assertThat(result).isNotNull(); + assertThat(result.getId()).isNotNull(); + CollectionPropertiesEntity props = db.collection(name).getProperties().get(); + assertThat(props.getNumberOfShards()).isEqualTo(2); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void createCollectionWithShardingStrategys(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 4)); + assumeTrue(isCluster()); + + String name = rndName(); + final CollectionEntity result = db.createCollection(name, new CollectionCreateOptions() + .shardingStrategy(ShardingStrategy.COMMUNITY_COMPAT.getInternalName())).get(); + + assertThat(result).isNotNull(); + assertThat(result.getId()).isNotNull(); + CollectionPropertiesEntity props = db.collection(name).getProperties().get(); + assertThat(props.getShardingStrategy()).isEqualTo(ShardingStrategy.COMMUNITY_COMPAT.getInternalName()); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void createCollectionWithSmartJoinAttribute(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isEnterprise()); + assumeTrue(isCluster()); + + String fooName = rndName(); + db.collection(fooName).create().get(); + + String name = rndName(); + final CollectionEntity result = db.createCollection(name, + new CollectionCreateOptions().smartJoinAttribute("test123").distributeShardsLike(fooName).shardKeys("_key:")).get(); + assertThat(result).isNotNull(); + assertThat(result.getId()).isNotNull(); + assertThat(db.collection(name).getProperties().get().getSmartJoinAttribute()).isEqualTo("test123"); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void createCollectionWithSmartJoinAttributeWrong(ArangoDatabaseAsync db) { + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isEnterprise()); + assumeTrue(isCluster()); + + String name = rndName(); + + Throwable thrown = catchThrowable(() -> db.createCollection(name, new CollectionCreateOptions().smartJoinAttribute("test123")).get()).getCause(); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + assertThat(((ArangoDBException) thrown).getErrorNum()).isEqualTo(4006); + assertThat(((ArangoDBException) thrown).getResponseCode()).isEqualTo(400); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void createCollectionWithNumberOfShardsAndShardKey(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isCluster()); + + String name = rndName(); + final CollectionEntity result = db + .createCollection(name, new CollectionCreateOptions().numberOfShards(2).shardKeys("a")).get(); + assertThat(result).isNotNull(); + assertThat(result.getId()).isNotNull(); + final CollectionPropertiesEntity properties = db.collection(name).getProperties().get(); + assertThat(properties.getNumberOfShards()).isEqualTo(2); + assertThat(properties.getShardKeys()).hasSize(1); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void createCollectionWithNumberOfShardsAndShardKeys(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isCluster()); + String name = rndName(); + final CollectionEntity result = db.createCollection(name, + new CollectionCreateOptions().numberOfShards(2).shardKeys("a", "b")).get(); + assertThat(result).isNotNull(); + assertThat(result.getId()).isNotNull(); + final CollectionPropertiesEntity properties = db.collection(name).getProperties().get(); + assertThat(properties.getNumberOfShards()).isEqualTo(2); + assertThat(properties.getShardKeys()).hasSize(2); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void createCollectionWithDistributeShardsLike(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isEnterprise()); + assumeTrue(isCluster()); + + final Integer numberOfShards = 3; + + String name1 = rndName(); + String name2 = rndName(); + db.createCollection(name1, new CollectionCreateOptions().numberOfShards(numberOfShards)).get(); + db.createCollection(name2, new CollectionCreateOptions().distributeShardsLike(name1)).get(); + + assertThat(db.collection(name1).getProperties().get().getNumberOfShards()).isEqualTo(numberOfShards); + assertThat(db.collection(name2).getProperties().get().getNumberOfShards()).isEqualTo(numberOfShards); + } + + private void createCollectionWithKeyType(ArangoDatabaseAsync db, KeyType keyType) throws ExecutionException, InterruptedException { + String name = rndName(); + db.createCollection(name, new CollectionCreateOptions().keyOptions( + false, + keyType, + null, + null + )).get(); + assertThat(db.collection(name).getProperties().get().getKeyOptions().getType()).isEqualTo(keyType); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void createCollectionWithKeyTypeAutoincrement(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isSingleServer()); + createCollectionWithKeyType(db, KeyType.autoincrement); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void createCollectionWithKeyTypePadded(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 4)); + createCollectionWithKeyType(db, KeyType.padded); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void createCollectionWithKeyTypeTraditional(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + createCollectionWithKeyType(db, KeyType.traditional); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void createCollectionWithKeyTypeUuid(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 4)); + createCollectionWithKeyType(db, KeyType.uuid); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void createCollectionWithJsonSchema(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 7)); + String name = rndName(); + String rule = ("{ " + + " \"properties\": {" + + " \"number\": {" + + " \"type\": \"number\"" + + " }" + + " }" + + " }") + .replaceAll("\\s", ""); + String message = "The document has problems!"; + + final CollectionEntity result = db + .createCollection(name, new CollectionCreateOptions() + .schema( + new CollectionSchema() + .setLevel(CollectionSchema.Level.NEW) + .setMessage(message) + .setRule(rule) + ) + ).get(); + assertThat(result.getSchema().getLevel()).isEqualTo(CollectionSchema.Level.NEW); + assertThat(result.getSchema().getRule()).isEqualTo(rule); + assertThat(result.getSchema().getMessage()).isEqualTo(message); + + CollectionPropertiesEntity props = db.collection(name).getProperties().get(); + assertThat(props.getSchema().getLevel()).isEqualTo(CollectionSchema.Level.NEW); + assertThat(props.getSchema().getRule()).isEqualTo(rule); + assertThat(props.getSchema().getMessage()).isEqualTo(message); + + BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + doc.addAttribute("number", 33); + db.collection(name).insertDocument(doc).get(); + + BaseDocument wrongDoc = new BaseDocument(UUID.randomUUID().toString()); + wrongDoc.addAttribute("number", "notANumber"); + Throwable thrown = catchThrowable(() -> db.collection(name).insertDocument(wrongDoc).get()).getCause(); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + ArangoDBException e = (ArangoDBException) thrown; + + assertThat(e).hasMessageContaining(message); + assertThat(e.getResponseCode()).isEqualTo(400); + assertThat(e.getErrorNum()).isEqualTo(1620); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void createCollectionWithComputedFields(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 10)); + String cName = rndName(); + ComputedValue cv = new ComputedValue() + .name("foo") + .expression("RETURN 11") + .overwrite(false) + .computeOn(ComputedValue.ComputeOn.insert) + .keepNull(false) + .failOnWarning(true); + + final CollectionEntity result = db.createCollection(cName, new CollectionCreateOptions().computedValues(cv)).get(); + + assertThat(result).isNotNull(); + assertThat(result.getComputedValues()) + .hasSize(1) + .contains(cv); + + ComputedValue cv2 = new ComputedValue() + .name("bar") + .expression("RETURN 22") + .overwrite(true) + .computeOn(ComputedValue.ComputeOn.update, ComputedValue.ComputeOn.replace) + .keepNull(true) + .failOnWarning(false); + + db.collection(cName).changeProperties(new CollectionPropertiesOptions().computedValues(cv2)).get(); + + CollectionPropertiesEntity props = db.collection(cName).getProperties().get(); + assertThat(props.getComputedValues()) + .hasSize(1) + .contains(cv2); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void deleteCollection(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + String name = rndName(); + db.createCollection(name, null).get(); + db.collection(name).drop().get(); + Throwable thrown = catchThrowable(() -> db.collection(name).getInfo().get()).getCause(); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void deleteSystemCollection(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + final String name = "_system_test"; + db.createCollection(name, new CollectionCreateOptions().isSystem(true)).get(); + db.collection(name).drop(true).get(); + Throwable thrown = catchThrowable(() -> db.collection(name).getInfo().get()).getCause(); + assertThat(thrown) + .isInstanceOf(ArangoDBException.class) + .extracting(it -> ((ArangoDBException) it).getResponseCode()) + .isEqualTo(404); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void deleteSystemCollectionFail(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + final String name = "_system_test"; + ArangoCollectionAsync collection = db.collection(name); + if (collection.exists().get()) + collection.drop(true).get(); + + db.createCollection(name, new CollectionCreateOptions().isSystem(true)).get(); + Throwable thrown = catchThrowable(() -> collection.drop().get()).getCause(); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + ArangoDBException e = (ArangoDBException) thrown; + assertThat(e.getResponseCode()).isEqualTo(403); + collection.drop(true).get(); + assertThat(collection.exists().get()).isFalse(); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void getIndex(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + final Collection fields = Collections.singletonList("field-" + rnd()); + final IndexEntity createResult = db.collection(CNAME1).ensurePersistentIndex(fields, null).get(); + final IndexEntity readResult = db.getIndex(createResult.getId()).get(); + assertThat(readResult.getId()).isEqualTo(createResult.getId()); + assertThat(readResult.getType()).isEqualTo(createResult.getType()); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void deleteIndex(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + final Collection fields = Collections.singletonList("field-" + rnd()); + final IndexEntity createResult = db.collection(CNAME1).ensurePersistentIndex(fields, null).get(); + final String id = db.deleteIndex(createResult.getId()).get(); + assertThat(id).isEqualTo(createResult.getId()); + Throwable thrown = catchThrowable(() -> db.getIndex(id).get()).getCause(); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + ArangoDBException e = (ArangoDBException) thrown; + assertThat(e.getResponseCode()).isEqualTo(404); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void getCollections(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + final Collection collections = db.getCollections(null).get(); + long count = collections.stream().map(CollectionEntity::getName).filter(it -> it.equals(CNAME1)).count(); + assertThat(count).isEqualTo(1L); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void getCollectionsExcludeSystem(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + final CollectionsReadOptions options = new CollectionsReadOptions().excludeSystem(true); + final Collection nonSystemCollections = db.getCollections(options).get(); + final Collection allCollections = db.getCollections(null).get(); + assertThat(allCollections).hasSizeGreaterThan(nonSystemCollections.size()); + } + + @ParameterizedTest + @MethodSource("asyncArangos") + void grantAccess(ArangoDBAsync arangoDB) throws ExecutionException, InterruptedException { + String user = "user-" + rnd(); + arangoDB.createUser(user, "1234", null).get(); + try { + arangoDB.db(getTestDb()).grantAccess(user).get(); + } finally { + arangoDB.deleteUser(user).get(); + } + } + + @ParameterizedTest + @MethodSource("asyncArangos") + void grantAccessRW(ArangoDBAsync arangoDB) throws ExecutionException, InterruptedException { + String user = "user-" + rnd(); + arangoDB.createUser(user, "1234", null).get(); + try { + arangoDB.db(getTestDb()).grantAccess(user, Permissions.RW).get(); + } finally { + arangoDB.deleteUser(user).get(); + } + } + + @ParameterizedTest + @MethodSource("asyncArangos") + void grantAccessRO(ArangoDBAsync arangoDB) throws ExecutionException, InterruptedException { + String user = "user-" + rnd(); + arangoDB.createUser(user, "1234", null).get(); + try { + arangoDB.db(getTestDb()).grantAccess(user, Permissions.RO).get(); + } finally { + arangoDB.deleteUser(user).get(); + } + } + + @ParameterizedTest + @MethodSource("asyncArangos") + void grantAccessNONE(ArangoDBAsync arangoDB) throws ExecutionException, InterruptedException { + String user = "user-" + rnd(); + arangoDB.createUser(user, "1234", null).get(); + try { + arangoDB.db(getTestDb()).grantAccess(user, Permissions.NONE).get(); + } finally { + arangoDB.deleteUser(user).get(); + } + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void grantAccessUserNotFound(ArangoDatabaseAsync db) { + String user = "user-" + rnd(); + Throwable thrown = catchThrowable(() -> db.grantAccess(user, Permissions.RW).get()).getCause(); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + } + + @ParameterizedTest + @MethodSource("asyncArangos") + void revokeAccess(ArangoDBAsync arangoDB) throws ExecutionException, InterruptedException { + String user = "user-" + rnd(); + arangoDB.createUser(user, "1234", null).get(); + try { + arangoDB.db(getTestDb()).revokeAccess(user).get(); + } finally { + arangoDB.deleteUser(user).get(); + } + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void revokeAccessUserNotFound(ArangoDatabaseAsync db) { + String user = "user-" + rnd(); + Throwable thrown = catchThrowable(() -> db.revokeAccess(user).get()).getCause(); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + } + + @ParameterizedTest + @MethodSource("asyncArangos") + void resetAccess(ArangoDBAsync arangoDB) throws ExecutionException, InterruptedException { + String user = "user-" + rnd(); + arangoDB.createUser(user, "1234", null).get(); + try { + arangoDB.db(getTestDb()).resetAccess(user).get(); + } finally { + arangoDB.deleteUser(user).get(); + } + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void resetAccessUserNotFound(ArangoDatabaseAsync db) { + String user = "user-" + rnd(); + Throwable thrown = catchThrowable(() -> db.resetAccess(user).get()).getCause(); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + } + + @ParameterizedTest + @MethodSource("asyncArangos") + void grantDefaultCollectionAccess(ArangoDBAsync arangoDB) throws ExecutionException, InterruptedException { + String user = "user-" + rnd(); + arangoDB.createUser(user, "1234").get(); + try { + arangoDB.db(getTestDb()).grantDefaultCollectionAccess(user, Permissions.RW).get(); + } finally { + arangoDB.deleteUser(user).get(); + } + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void getPermissions(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assertThat(db.getPermissions("root").get()).isEqualTo(Permissions.RW); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void query(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + ArangoCursorAsync cursor = db.query("for i in 0..9 return i", Integer.class).get(); + List res = cursor.getResult(); + assertThat(res).hasSize(10); + for (int i = 0; i < 10; i++) { + assertThat(res.get(i)).isEqualTo(i); + } + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void queryWithNullBindVar(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + final ArangoCursorAsync cursor = db.query("return @foo", Object.class, Collections.singletonMap("foo", null)).get(); + assertThat(cursor.getResult()).containsExactly((Object) null); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void queryForEach(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + for (int i = 0; i < 10; i++) { + db.collection(CNAME1).insertDocument(new BaseDocument(), null).get(); + } + final ArangoCursorAsync cursor = db.query("for i in " + CNAME1 + " return i._id", String.class).get(); + assertThat(cursor.getResult()).hasSizeGreaterThanOrEqualTo(10); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void queryWithCount(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + for (int i = 0; i < 10; i++) { + db.collection(CNAME1).insertDocument(new BaseDocument(), null).get(); + } + + final ArangoCursorAsync cursor = db + .query("for i in " + CNAME1 + " Limit 6 return i._id", String.class, new AqlQueryOptions().count(true)).get(); + assertThat(cursor.getCount()).isEqualTo(6); + assertThat(cursor.getResult()).hasSize(6); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void queryWithLimitAndFullCount(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + for (int i = 0; i < 10; i++) { + db.collection(CNAME1).insertDocument(new BaseDocument(), null).get(); + } + + final ArangoCursorAsync cursor = db + .query("for i in " + CNAME1 + " Limit 5 return i._id", String.class, new AqlQueryOptions().fullCount(true)).get(); + assertThat(cursor.getResult()).hasSize(5); + assertThat(cursor.getExtra().getStats()).isNotNull(); + assertThat(cursor.getExtra().getStats().getExecutionTime()).isPositive(); + assertThat((cursor.getExtra().getStats().getFullCount())).isGreaterThanOrEqualTo(10); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void queryStats(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + for (int i = 0; i < 10; i++) { + db.collection(CNAME1).insertDocument(new BaseDocument(), null).get(); + } + + final ArangoCursorAsync cursor = db.query("for i in " + CNAME1 + " return i", Object.class).get(); + assertThat(cursor.getResult()).hasSizeGreaterThanOrEqualTo(10); + assertThat(cursor.getExtra().getStats()).isNotNull(); + assertThat(cursor.getExtra().getStats().getWritesExecuted()).isNotNull(); + assertThat(cursor.getExtra().getStats().getWritesIgnored()).isNotNull(); + assertThat(cursor.getExtra().getStats().getScannedFull()).isNotNull(); + assertThat(cursor.getExtra().getStats().getScannedIndex()).isNotNull(); + assertThat(cursor.getExtra().getStats().getFiltered()).isNotNull(); + assertThat(cursor.getExtra().getStats().getExecutionTime()).isNotNull(); + assertThat(cursor.getExtra().getStats().getPeakMemoryUsage()).isNotNull(); + assertThat(cursor.getExtra().getStats().getIntermediateCommits()).isNotNull(); + if (isAtLeastVersion(3, 12)) { + assertThat(cursor.getExtra().getStats().getDocumentLookups()).isNotNull(); + assertThat(cursor.getExtra().getStats().getSeeks()).isNotNull(); + } + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void queryWithBatchSize(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + final ArangoCursorAsync cursor = db + .query("for i in 1..10 return i", Integer.class, new AqlQueryOptions().batchSize(5)).get(); + + assertThat(cursor.getResult()).hasSize(5); + assertThat(cursor.hasMore()).isTrue(); + + ArangoCursorAsync c2 = cursor.nextBatch().get(); + assertThat(c2.getResult()).hasSize(5); + assertThat(c2.hasMore()).isFalse(); + } + + @SlowTest + @ParameterizedTest + @MethodSource("asyncDbs") + void queryWithTTL(ArangoDatabaseAsync db) throws InterruptedException, ExecutionException { + final ArangoCursorAsync cursor = db + .query("for i in 1..10 return i", Integer.class, new AqlQueryOptions().batchSize(5).ttl(1)).get(); + assertThat(cursor.getResult()).hasSize(5); + assertThat(cursor.hasMore()).isTrue(); + Thread.sleep(1_000); + Throwable thrown = catchThrowable(() -> cursor.nextBatch().get()).getCause(); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + ArangoDBException ex = (ArangoDBException) thrown; + assertThat(ex.getMessage()).isEqualTo("Response: 404, Error: 1600 - cursor not found"); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void queryRawBytes(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + InternalSerde serde = db.getSerde(); + RawBytes doc = RawBytes.of(serde.serialize(Collections.singletonMap("value", 1))); + RawBytes res = db.query("RETURN @doc", RawBytes.class, Collections.singletonMap("doc", doc)).get() + .getResult().get(0); + JsonNode data = serde.deserialize(res.get(), JsonNode.class); + assertThat(data.isObject()).isTrue(); + assertThat(data.get("value").isNumber()).isTrue(); + assertThat(data.get("value").numberValue()).isEqualTo(1); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void changeQueryCache(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + QueryCachePropertiesEntity properties = db.getQueryCacheProperties().get(); + assertThat(properties).isNotNull(); + assertThat(properties.getMode()).isEqualTo(CacheMode.off); + assertThat(properties.getMaxResults()).isPositive(); + + properties.setMode(CacheMode.on); + properties = db.setQueryCacheProperties(properties).get(); + assertThat(properties).isNotNull(); + assertThat(properties.getMode()).isEqualTo(CacheMode.on); + + properties = db.getQueryCacheProperties().get(); + assertThat(properties.getMode()).isEqualTo(CacheMode.on); + + final QueryCachePropertiesEntity properties2 = new QueryCachePropertiesEntity(); + properties2.setMode(CacheMode.off); + db.setQueryCacheProperties(properties2).get(); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void queryWithCache(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isSingleServer()); + for (int i = 0; i < 10; i++) { + db.collection(CNAME1).insertDocument(new BaseDocument(), null).get(); + } + + final QueryCachePropertiesEntity properties = new QueryCachePropertiesEntity(); + properties.setMode(CacheMode.on); + db.setQueryCacheProperties(properties).get(); + + final ArangoCursorAsync cursor = db + .query("FOR t IN " + CNAME1 + " FILTER t.age >= 10 SORT t.age RETURN t._id", String.class, + new AqlQueryOptions().cache(true)).get(); + + assertThat((Object) cursor).isNotNull(); + assertThat(cursor.isCached()).isFalse(); + + final ArangoCursorAsync cachedCursor = db + .query("FOR t IN " + CNAME1 + " FILTER t.age >= 10 SORT t.age RETURN t._id", String.class, + new AqlQueryOptions().cache(true)).get(); + + assertThat((Object) cachedCursor).isNotNull(); + assertThat(cachedCursor.isCached()).isTrue(); + + final QueryCachePropertiesEntity properties2 = new QueryCachePropertiesEntity(); + properties2.setMode(CacheMode.off); + db.setQueryCacheProperties(properties2).get(); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void queryWithMemoryLimit(ArangoDatabaseAsync db) { + Throwable thrown = catchThrowable(() -> db.query("RETURN 1..100000", String.class, + new AqlQueryOptions().memoryLimit(32 * 1024L)).get()).getCause(); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + assertThat(((ArangoDBException) thrown).getErrorNum()).isEqualTo(32); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void queryWithFailOnWarningTrue(ArangoDatabaseAsync db) { + Throwable thrown = catchThrowable(() -> db.query("RETURN 1 / 0", String.class, + new AqlQueryOptions().failOnWarning(true)).get()).getCause(); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void queryWithFailOnWarningFalse(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + final ArangoCursorAsync cursor = db + .query("RETURN 1 / 0", String.class, new AqlQueryOptions().failOnWarning(false)).get(); + assertThat(cursor.getResult()).containsExactly((String) null); + } + + @SlowTest + @ParameterizedTest + @MethodSource("asyncDbs") + void queryWithTimeout(ArangoDatabaseAsync db) { + assumeTrue(isAtLeastVersion(3, 6)); + Throwable thrown = catchThrowable(() -> db.query("RETURN SLEEP(1)", String.class, + new AqlQueryOptions().maxRuntime(0.1)).get()).getCause(); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + assertThat(((ArangoDBException) thrown).getResponseCode()).isEqualTo(410); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void queryWithMaxWarningCount(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + final ArangoCursorAsync cursorWithWarnings = db + .query("RETURN 1 / 0", String.class, new AqlQueryOptions()).get(); + assertThat(cursorWithWarnings.getExtra().getWarnings()).hasSize(1); + final ArangoCursorAsync cursorWithLimitedWarnings = db + .query("RETURN 1 / 0", String.class, new AqlQueryOptions().maxWarningCount(0L)).get(); + final Collection warnings = cursorWithLimitedWarnings.getExtra().getWarnings(); + assertThat(warnings).isNullOrEmpty(); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void queryCursor(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + ArangoCursorAsync c1 = db.query("for i in 1..4 return i", Integer.class, + new AqlQueryOptions().batchSize(1)).get(); + List result = new ArrayList<>(); + result.addAll(c1.getResult()); + ArangoCursorAsync c2 = c1.nextBatch().get(); + result.addAll(c2.getResult()); + ArangoCursorAsync c3 = db.cursor(c2.getId(), Integer.class).get(); + result.addAll(c3.getResult()); + ArangoCursorAsync c4 = c3.nextBatch().get(); + result.addAll(c4.getResult()); + assertThat(c4.hasMore()).isFalse(); + assertThat(result).containsExactly(1, 2, 3, 4); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void queryCursorInTx(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + StreamTransactionEntity tx = db.beginStreamTransaction(new StreamTransactionOptions()).get(); + ArangoCursorAsync c1 = db.query("for i in 1..4 return i", Integer.class, + new AqlQueryOptions().batchSize(1).streamTransactionId(tx.getId())).get(); + List result = new ArrayList<>(); + result.addAll(c1.getResult()); + ArangoCursorAsync c2 = c1.nextBatch().get(); + result.addAll(c2.getResult()); + ArangoCursorAsync c3 = db.cursor(c2.getId(), Integer.class, + new AqlQueryOptions().streamTransactionId(tx.getId())).get(); + result.addAll(c3.getResult()); + ArangoCursorAsync c4 = c3.nextBatch().get(); + result.addAll(c4.getResult()); + assertThat(c4.hasMore()).isFalse(); + assertThat(result).containsExactly(1, 2, 3, 4); + db.abortStreamTransaction(tx.getId()).get(); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void queryCursorRetry(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 11)); + ArangoCursorAsync c1 = db.query("for i in 1..4 return i", Integer.class, + new AqlQueryOptions().batchSize(1).allowRetry(true)).get(); + List result = new ArrayList<>(); + result.addAll(c1.getResult()); + ArangoCursorAsync c2 = c1.nextBatch().get(); + result.addAll(c2.getResult()); + ArangoCursorAsync c3 = db.cursor(c2.getId(), Integer.class, c2.getNextBatchId()).get(); + result.addAll(c3.getResult()); + ArangoCursorAsync c4 = c3.nextBatch().get(); + result.addAll(c4.getResult()); + c4.close(); + assertThat(c4.hasMore()).isFalse(); + assertThat(result).containsExactly(1, 2, 3, 4); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void queryCursorRetryInTx(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 11)); + StreamTransactionEntity tx = db.beginStreamTransaction(new StreamTransactionOptions()).get(); + ArangoCursorAsync c1 = db.query("for i in 1..4 return i", Integer.class, + new AqlQueryOptions().batchSize(1).allowRetry(true).streamTransactionId(tx.getId())).get(); + List result = new ArrayList<>(); + result.addAll(c1.getResult()); + ArangoCursorAsync c2 = c1.nextBatch().get(); + result.addAll(c2.getResult()); + ArangoCursorAsync c3 = db.cursor(c2.getId(), Integer.class, c2.getNextBatchId(), + new AqlQueryOptions().streamTransactionId(tx.getId())).get(); + result.addAll(c3.getResult()); + ArangoCursorAsync c4 = c3.nextBatch().get(); + result.addAll(c4.getResult()); + c4.close(); + assertThat(c4.hasMore()).isFalse(); + assertThat(result).containsExactly(1, 2, 3, 4); + db.abortStreamTransaction(tx.getId()).get(); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void changeQueryTrackingProperties(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + try { + QueryTrackingPropertiesEntity properties = db.getQueryTrackingProperties().get(); + assertThat(properties).isNotNull(); + assertThat(properties.getEnabled()).isTrue(); + assertThat(properties.getTrackSlowQueries()).isTrue(); + assertThat(properties.getMaxQueryStringLength()).isPositive(); + assertThat(properties.getMaxSlowQueries()).isPositive(); + assertThat(properties.getSlowQueryThreshold()).isPositive(); + properties.setEnabled(false); + properties = db.setQueryTrackingProperties(properties).get(); + assertThat(properties).isNotNull(); + assertThat(properties.getEnabled()).isFalse(); + properties = db.getQueryTrackingProperties().get(); + assertThat(properties.getEnabled()).isFalse(); + } finally { + final QueryTrackingPropertiesEntity properties = new QueryTrackingPropertiesEntity(); + properties.setEnabled(true); + db.setQueryTrackingProperties(properties).get(); + } + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void queryWithBindVars(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + for (int i = 0; i < 10; i++) { + final BaseDocument baseDocument = new BaseDocument(UUID.randomUUID().toString()); + baseDocument.addAttribute("age", 20 + i); + db.collection(CNAME1).insertDocument(baseDocument, null).get(); + } + final Map bindVars = new HashMap<>(); + bindVars.put("@coll", CNAME1); + bindVars.put("age", 25); + + final ArangoCursorAsync cursor = db + .query("FOR t IN @@coll FILTER t.age >= @age SORT t.age RETURN t._id", String.class, bindVars).get(); + + assertThat(cursor.getResult()).hasSizeGreaterThanOrEqualTo(5); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void queryWithRawBindVars(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + final Map bindVars = new HashMap<>(); + bindVars.put("foo", RawJson.of("\"fooValue\"")); + bindVars.put("bar", RawBytes.of(db.getSerde().serializeUserData(11))); + + final JsonNode res = db.query("RETURN {foo: @foo, bar: @bar}", JsonNode.class, bindVars).get() + .getResult().get(0); + + assertThat(res.get("foo").textValue()).isEqualTo("fooValue"); + assertThat(res.get("bar").intValue()).isEqualTo(11); + } + + @ParameterizedTest + @MethodSource("asyncArangos") + void queryWithWarning(ArangoDBAsync arangoDB) throws ExecutionException, InterruptedException { + final ArangoCursorAsync cursor = arangoDB.db().query("return 1/0", String.class).get(); + assertThat(cursor.getExtra().getWarnings()) + .isNotNull() + .hasSize(1) + .allSatisfy(w -> assertThat(w.getMessage()).contains("division by zero")); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void queryStream(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + final ArangoCursorAsync cursor = db + .query("FOR i IN 1..2 RETURN i", Void.class, new AqlQueryOptions().stream(true).count(true)).get(); + assertThat((Object) cursor).isNotNull(); + assertThat(cursor.getCount()).isNull(); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void queryForceOneShardAttributeValue(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 10)); + assumeTrue(isCluster()); + assumeTrue(isEnterprise()); + + String cname = "forceOneShardAttr-" + UUID.randomUUID(); + db.createCollection(cname, new CollectionCreateOptions() + .shardKeys("foo") + .numberOfShards(3)).get(); + ArangoCollectionAsync col = db.collection(cname); + BaseDocument doc = new BaseDocument(); + doc.addAttribute("foo", "bar"); + col.insertDocument(doc).get(); + + Iterator c1 = db + .query("FOR d IN @@c RETURN d", BaseDocument.class, Collections.singletonMap("@c", cname), + new AqlQueryOptions().forceOneShardAttributeValue("bar")).get().getResult().iterator(); + assertThat(c1.hasNext()).isTrue(); + assertThat(c1.next().getAttribute("foo")).isEqualTo("bar"); + + Iterator c2 = db + .query("FOR d IN @@c RETURN d", BaseDocument.class, Collections.singletonMap("@c", cname), + new AqlQueryOptions().forceOneShardAttributeValue("ooo")).get().getResult().iterator(); + assertThat(c2.hasNext()).isFalse(); + } + + @ParameterizedTest + @MethodSource("asyncArangos") + void queryClose(ArangoDBAsync arangoDB) throws ExecutionException, InterruptedException { + final ArangoCursorAsync cursor = arangoDB.db() + .query("for i in 1..2 return i", String.class, new AqlQueryOptions().batchSize(1)).get(); + cursor.close().get(); + assertThat(cursor.getResult()).hasSize(1); + Throwable thrown = catchThrowable(() -> cursor.nextBatch().get()).getCause(); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + ArangoDBException ex = (ArangoDBException) thrown; + assertThat(ex.getResponseCode()).isEqualTo(404); + assertThat(ex.getMessage()).contains("cursor not found"); + } + + @ParameterizedTest + @MethodSource("asyncArangos") + void queryCloseShouldBeIdempotent(ArangoDBAsync arangoDB) throws ExecutionException, InterruptedException { + ArangoCursorAsync cursor = arangoDB.db().query("for i in 1..2 return i", Integer.class, + new AqlQueryOptions().batchSize(1)).get(); + cursor.close().get(); + cursor.close().get(); + } + + @ParameterizedTest + @MethodSource("asyncArangos") + void queryCloseOnCursorWithoutId(ArangoDBAsync arangoDB) throws ExecutionException, InterruptedException { + ArangoCursorAsync cursor = arangoDB.db().query("return 1", Integer.class).get(); + cursor.close().get(); + cursor.close().get(); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void queryNoResults(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + db.query("FOR i IN @@col RETURN i", BaseDocument.class, new MapBuilder().put("@col", CNAME1).get()).get(); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void queryWithNullBindParam(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + db.query("FOR i IN @@col FILTER i.test == @test RETURN i", BaseDocument.class, + new MapBuilder().put("@col", CNAME1).put("test", null).get()).get(); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void queryAllowDirtyRead(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + final ArangoCursorAsync cursor = db.query("FOR i IN @@col FILTER i.test == @test RETURN i", + BaseDocument.class, new MapBuilder().put("@col", CNAME1).put("test", null).get(), + new AqlQueryOptions().allowDirtyRead(true)).get(); + assertThat(cursor.isPotentialDirtyRead()).isTrue(); + } + + @ParameterizedTest + @MethodSource("asyncArangos") + void queryAllowRetry(ArangoDBAsync arangoDB) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 11)); + final ArangoCursorAsync cursor = arangoDB.db() + .query("for i in 1..2 return i", String.class, new AqlQueryOptions().allowRetry(true).batchSize(1)).get(); + assertThat(cursor.getResult()).containsExactly("1"); + assertThat(cursor.hasMore()).isTrue(); + cursor.nextBatch().get(); + cursor.nextBatch().get(); + + ArangoCursorAsync c2 = cursor.nextBatch().get(); + assertThat(c2.getResult()).containsExactly("2"); + assertThat(c2.hasMore()).isFalse(); + + cursor.close().get(); + } + + @ParameterizedTest + @MethodSource("asyncArangos") + void queryAllowRetryClose(ArangoDBAsync arangoDB) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 11)); + final ArangoCursorAsync cursor = arangoDB.db() + .query("for i in 1..2 return i", String.class, new AqlQueryOptions().allowRetry(true).batchSize(1)).get(); + assertThat(cursor.getResult()).containsExactly("1"); + assertThat(cursor.hasMore()).isTrue(); + ArangoCursorAsync c2 = cursor.nextBatch().get(); + assertThat(c2.getResult()).containsExactly("2"); + assertThat(c2.hasMore()).isFalse(); + c2.close().get(); + } + + @ParameterizedTest + @MethodSource("asyncArangos") + void queryAllowRetryCloseBeforeLatestBatch(ArangoDBAsync arangoDB) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 11)); + final ArangoCursorAsync cursor = arangoDB.db() + .query("for i in 1..2 return i", String.class, new AqlQueryOptions().allowRetry(true).batchSize(1)).get(); + assertThat(cursor.getResult()).containsExactly("1"); + assertThat(cursor.hasMore()).isTrue(); + cursor.close().get(); + } + + @ParameterizedTest + @MethodSource("asyncArangos") + void queryAllowRetryCloseSingleBatch(ArangoDBAsync arangoDB) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 11)); + final ArangoCursorAsync cursor = arangoDB.db() + .query("for i in 1..2 return i", String.class, new AqlQueryOptions().allowRetry(true)).get(); + assertThat(cursor.getResult()).containsExactly("1", "2"); + assertThat(cursor.hasMore()).isFalse(); + cursor.close().get(); + } + + private String getExplainQuery(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + ArangoCollectionAsync character = db.collection("got_characters"); + ArangoCollectionAsync actor = db.collection("got_actors"); + + if (!character.exists().get()) + character.create().get(); + + if (!actor.exists().get()) + actor.create().get(); + + return "FOR `character` IN `got_characters` " + + " FOR `actor` IN `got_actors` " + + " FILTER `actor`.`_id` == @myId" + + " FILTER `character`.`actor` == `actor`.`_id` " + + " FILTER `character`.`value` != 1/0 " + + " RETURN {`character`, `actor`}"; + } + + void checkExecutionPlan(AqlExecutionExplainEntity.ExecutionPlan plan) { + assertThat(plan).isNotNull(); + assertThat(plan.getEstimatedNrItems()) + .isNotNull() + .isNotNegative(); + assertThat(plan.getNodes()).isNotEmpty(); + + AqlExecutionExplainEntity.ExecutionNode node = plan.getNodes().iterator().next(); + assertThat(node.getEstimatedCost()).isNotNull(); + + assertThat(plan.getEstimatedCost()).isNotNull().isNotNegative(); + assertThat(plan.getCollections()).isNotEmpty(); + + AqlExecutionExplainEntity.ExecutionCollection collection = plan.getCollections().iterator().next(); + assertThat(collection.getName()) + .isNotNull() + .isNotEmpty(); + + assertThat(plan.getRules()).isNotEmpty(); + assertThat(plan.getVariables()).isNotEmpty(); + + AqlExecutionExplainEntity.ExecutionVariable variable = plan.getVariables().iterator().next(); + assertThat(variable.getName()) + .isNotNull() + .isNotEmpty(); + } + + @SuppressWarnings("deprecation") + @ParameterizedTest + @MethodSource("asyncDbs") + void explainQuery(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + AqlExecutionExplainEntity explain = db.explainQuery( + getExplainQuery(db), + Collections.singletonMap("myId", "123"), + new AqlQueryExplainOptions()).get(); + assertThat(explain).isNotNull(); + + checkExecutionPlan(explain.getPlan()); + assertThat(explain.getPlans()).isNull(); + assertThat(explain.getWarnings()).isNotEmpty(); + + CursorWarning warning = explain.getWarnings().iterator().next(); + assertThat(warning).isNotNull(); + assertThat(warning.getCode()).isEqualTo(1562); + assertThat(warning.getMessage()).contains("division by zero"); + + assertThat(explain.getStats()).isNotNull(); + + assertThat(explain.getStats().getExecutionTime()) + .isNotNull() + .isPositive(); + + assertThat(explain.getCacheable()).isFalse(); + } + + @SuppressWarnings("deprecation") + @ParameterizedTest + @MethodSource("asyncDbs") + void explainQueryAllPlans(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + AqlExecutionExplainEntity explain = db.explainQuery( + getExplainQuery(db), + Collections.singletonMap("myId", "123"), + new AqlQueryExplainOptions().allPlans(true)).get(); + assertThat(explain).isNotNull(); + + assertThat(explain.getPlan()).isNull(); + assertThat(explain.getPlans()).allSatisfy(this::checkExecutionPlan); + assertThat(explain.getWarnings()).isNotEmpty(); + + CursorWarning warning = explain.getWarnings().iterator().next(); + assertThat(warning).isNotNull(); + assertThat(warning.getCode()).isEqualTo(1562); + assertThat(warning.getMessage()).contains("division by zero"); + + assertThat(explain.getStats()).isNotNull(); + + assertThat(explain.getStats().getExecutionTime()) + .isNotNull() + .isPositive(); + + assertThat(explain.getCacheable()).isNull(); + } + + void checkUntypedExecutionPlan(AqlQueryExplainEntity.ExecutionPlan plan) { + assertThat(plan).isNotNull(); + assertThat(plan.get("estimatedNrItems")) + .isInstanceOf(Integer.class) + .asInstanceOf(INTEGER) + .isNotNull() + .isNotNegative(); + assertThat(plan.getNodes()).isNotEmpty(); + + AqlQueryExplainEntity.ExecutionNode node = plan.getNodes().iterator().next(); + assertThat(node.get("estimatedCost")).isNotNull(); + + assertThat(plan.getEstimatedCost()).isNotNull().isNotNegative(); + assertThat(plan.getCollections()).isNotEmpty(); + + AqlQueryExplainEntity.ExecutionCollection collection = plan.getCollections().iterator().next(); + assertThat(collection.get("name")) + .isInstanceOf(String.class) + .asInstanceOf(STRING) + .isNotNull() + .isNotEmpty(); + + assertThat(plan.getRules()).isNotEmpty(); + assertThat(plan.getVariables()).isNotEmpty(); + + AqlQueryExplainEntity.ExecutionVariable variable = plan.getVariables().iterator().next(); + assertThat(variable.get("name")) + .isInstanceOf(String.class) + .asInstanceOf(STRING) + .isNotNull() + .isNotEmpty(); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void explainAqlQuery(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + AqlQueryExplainEntity explain = db.explainAqlQuery( + getExplainQuery(db), + Collections.singletonMap("myId", "123"), + new ExplainAqlQueryOptions()).get(); + assertThat(explain).isNotNull(); + + checkUntypedExecutionPlan(explain.getPlan()); + assertThat(explain.getPlans()).isNull(); + assertThat(explain.getWarnings()).isNotEmpty(); + + CursorWarning warning = explain.getWarnings().iterator().next(); + assertThat(warning).isNotNull(); + assertThat(warning.getCode()).isEqualTo(1562); + assertThat(warning.getMessage()).contains("division by zero"); + + assertThat(explain.getStats()).isNotNull(); + + assertThat(explain.getStats().get("executionTime")) + .isInstanceOf(Double.class) + .asInstanceOf(DOUBLE) + .isNotNull() + .isPositive(); + + assertThat(explain.getCacheable()).isFalse(); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void explainAqlQueryAllPlans(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + AqlQueryExplainEntity explain = db.explainAqlQuery( + getExplainQuery(db), + Collections.singletonMap("myId", "123"), + new ExplainAqlQueryOptions().allPlans(true)).get(); + assertThat(explain).isNotNull(); + + assertThat(explain.getPlan()).isNull(); + assertThat(explain.getPlans()).allSatisfy(this::checkUntypedExecutionPlan); + assertThat(explain.getWarnings()).isNotEmpty(); + + CursorWarning warning = explain.getWarnings().iterator().next(); + assertThat(warning).isNotNull(); + assertThat(warning.getCode()).isEqualTo(1562); + assertThat(warning.getMessage()).contains("division by zero"); + + assertThat(explain.getStats()).isNotNull(); + + assertThat(explain.getStats().get("executionTime")) + .isInstanceOf(Double.class) + .asInstanceOf(DOUBLE) + .isNotNull() + .isPositive(); + + assertThat(explain.getCacheable()).isNull(); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void explainAqlQueryAllPlansCustomOption(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + AqlQueryExplainEntity explain = db.explainAqlQuery( + getExplainQuery(db), + Collections.singletonMap("myId", "123"), + new ExplainAqlQueryOptions().customOption("allPlans", true)).get(); + assertThat(explain).isNotNull(); + + assertThat(explain.getPlan()).isNull(); + assertThat(explain.getPlans()).allSatisfy(this::checkUntypedExecutionPlan); + assertThat(explain.getWarnings()).isNotEmpty(); + + CursorWarning warning = explain.getWarnings().iterator().next(); + assertThat(warning).isNotNull(); + assertThat(warning.getCode()).isEqualTo(1562); + assertThat(warning.getMessage()).contains("division by zero"); + + assertThat(explain.getStats()).isNotNull(); + + assertThat(explain.getStats().get("executionTime")) + .isInstanceOf(Double.class) + .asInstanceOf(DOUBLE) + .isNotNull() + .isPositive(); + + assertThat(explain.getCacheable()).isNull(); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void parseQuery(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + final AqlParseEntity parse = db.parseQuery("for i in 1..1 return i").get(); + assertThat(parse).isNotNull(); + assertThat(parse.getBindVars()).isEmpty(); + assertThat(parse.getCollections()).isEmpty(); + assertThat(parse.getAst()).hasSize(1); + } + + @SlowTest + @ParameterizedTest + @MethodSource("asyncDbs") + void getCurrentlyRunningQueries(ArangoDatabaseAsync db) throws InterruptedException, ExecutionException { + String query = "return sleep(1)"; + CompletableFuture> q = db.query(query, Void.class); + Thread.sleep(300); + final Collection currentlyRunningQueries = db.getCurrentlyRunningQueries().get(); + assertThat(currentlyRunningQueries).hasSize(1); + final QueryEntity queryEntity = currentlyRunningQueries.iterator().next(); + assertThat(queryEntity.getId()).isNotNull(); + assertThat(queryEntity.getDatabase()).isEqualTo(db.name()); + assertThat(queryEntity.getUser()).isEqualTo("root"); + assertThat(queryEntity.getQuery()).isEqualTo(query); + assertThat(queryEntity.getBindVars()).isEmpty(); + assertThat(queryEntity.getStarted()).isInThePast(); + assertThat(queryEntity.getRunTime()).isPositive(); + if (isAtLeastVersion(3, 11)) { + assertThat(queryEntity.getPeakMemoryUsage()).isNotNull(); + } + assertThat(queryEntity.getState()).isEqualTo(QueryExecutionState.EXECUTING); + assertThat(queryEntity.getStream()).isFalse(); + q.get(); + } + + @SlowTest + @ParameterizedTest + @MethodSource("asyncDbs") + void killQuery(ArangoDatabaseAsync db) throws InterruptedException, ExecutionException { + CompletableFuture> c = db.query("return sleep(5)", Void.class); + Thread.sleep(500); + + Collection currentlyRunningQueries = db.getCurrentlyRunningQueries().get(); + assertThat(currentlyRunningQueries).hasSize(1); + QueryEntity queryEntity = currentlyRunningQueries.iterator().next(); + assertThat(queryEntity.getState()).isEqualTo(QueryExecutionState.EXECUTING); + db.killQuery(queryEntity.getId()).get(); + + db.getCurrentlyRunningQueries().get().forEach(q -> + assertThat(q.getState()).isEqualTo(QueryExecutionState.KILLED) + ); + + Throwable thrown = catchThrowable(c::get).getCause(); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + ArangoDBException e = (ArangoDBException) thrown; + assertThat(e.getResponseCode()).isEqualTo(410); + assertThat(e.getErrorNum()).isEqualTo(1500); + assertThat(e.getErrorMessage()).contains("query killed"); + } + + @SlowTest + @ParameterizedTest + @MethodSource("asyncDbs") + void getAndClearSlowQueries(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + db.clearSlowQueries().get(); + + final QueryTrackingPropertiesEntity properties = db.getQueryTrackingProperties().get(); + final Long slowQueryThreshold = properties.getSlowQueryThreshold(); + properties.setSlowQueryThreshold(1L); + db.setQueryTrackingProperties(properties).get(); + + String query = "return sleep(1.1)"; + db.query(query, Void.class).get(); + final Collection slowQueries = db.getSlowQueries().get(); + assertThat(slowQueries).hasSize(1); + final QueryEntity queryEntity = slowQueries.iterator().next(); + assertThat(queryEntity.getId()).isNotNull(); + assertThat(queryEntity.getDatabase()).isEqualTo(db.name()); + assertThat(queryEntity.getUser()).isEqualTo("root"); + assertThat(queryEntity.getQuery()).isEqualTo(query); + assertThat(queryEntity.getBindVars()).isEmpty(); + assertThat(queryEntity.getStarted()).isInThePast(); + assertThat(queryEntity.getRunTime()).isPositive(); + if (isAtLeastVersion(3, 11)) { + assertThat(queryEntity.getPeakMemoryUsage()).isNotNull(); + } + assertThat(queryEntity.getState()).isEqualTo(QueryExecutionState.FINISHED); + assertThat(queryEntity.getStream()).isFalse(); + + db.clearSlowQueries().get(); + assertThat(db.getSlowQueries().get()).isEmpty(); + properties.setSlowQueryThreshold(slowQueryThreshold); + db.setQueryTrackingProperties(properties).get(); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void createGetDeleteAqlFunction(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + final Collection aqlFunctionsInitial = db.getAqlFunctions(null).get(); + assertThat(aqlFunctionsInitial).isEmpty(); + try { + db.createAqlFunction("myfunctions::temperature::celsiustofahrenheit", + "function (celsius) { return celsius * 1.8 + 32; }", null).get(); + + final Collection aqlFunctions = db.getAqlFunctions(null).get(); + assertThat(aqlFunctions).hasSizeGreaterThan(aqlFunctionsInitial.size()); + } finally { + final Integer deleteCount = db.deleteAqlFunction("myfunctions::temperature::celsiustofahrenheit", null).get(); + // compatibility with ArangoDB < 3.4 + if (isAtLeastVersion(3, 4)) { + assertThat(deleteCount).isEqualTo(1); + } else { + assertThat(deleteCount).isNull(); + } + final Collection aqlFunctions = db.getAqlFunctions(null).get(); + assertThat(aqlFunctions).hasSize(aqlFunctionsInitial.size()); + } + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void createGetDeleteAqlFunctionWithNamespace(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + final Collection aqlFunctionsInitial = db.getAqlFunctions(null).get(); + assertThat(aqlFunctionsInitial).isEmpty(); + try { + db.createAqlFunction("myfunctions::temperature::celsiustofahrenheit1", + "function (celsius) { return celsius * 1.8 + 32; }", null).get(); + db.createAqlFunction("myfunctions::temperature::celsiustofahrenheit2", + "function (celsius) { return celsius * 1.8 + 32; }", null).get(); + + } finally { + final Integer deleteCount = db + .deleteAqlFunction("myfunctions::temperature", new AqlFunctionDeleteOptions().group(true)).get(); + // compatibility with ArangoDB < 3.4 + if (isAtLeastVersion(3, 4)) { + assertThat(deleteCount).isEqualTo(2); + } else { + assertThat(deleteCount).isNull(); + } + final Collection aqlFunctions = db.getAqlFunctions(null).get(); + assertThat(aqlFunctions).hasSize(aqlFunctionsInitial.size()); + } + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void createGraph(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + String name = "graph-" + rnd(); + final GraphEntity result = db.createGraph(name, null, null).get(); + assertThat(result.getName()).isEqualTo(name); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void createGraphSatellite(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 7)); + assumeTrue(isCluster()); + assumeTrue(isEnterprise()); + + String name = "graph-" + rnd(); + final GraphEntity result = db.createGraph(name, null, new GraphCreateOptions().replicationFactor(ReplicationFactor.ofSatellite())).get(); + assertThat(result.getReplicationFactor()).isEqualTo(ReplicationFactor.ofSatellite()); + + GraphEntity info = db.graph(name).getInfo().get(); + assertThat(info.getReplicationFactor()).isEqualTo(ReplicationFactor.ofSatellite()); + + GraphEntity graph = db.getGraphs().get().stream().filter(g -> name.equals(g.getName())).findFirst().get(); + assertThat(graph.getReplicationFactor()).isEqualTo(ReplicationFactor.ofSatellite()); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void createGraphReplicationFaktor(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isCluster()); + String name = "graph-" + rnd(); + final String edgeCollection = rndName(); + final String fromCollection = rndName(); + final String toCollection = rndName(); + final Collection edgeDefinitions = + Collections.singletonList(new EdgeDefinition().collection(edgeCollection).from(fromCollection).to(toCollection)); + final GraphEntity result = db.createGraph(name, edgeDefinitions, new GraphCreateOptions().replicationFactor(2)).get(); + assertThat(result).isNotNull(); + for (final String collection : Arrays.asList(edgeCollection, fromCollection, toCollection)) { + final CollectionPropertiesEntity properties = db.collection(collection).getProperties().get(); + assertThat(properties.getReplicationFactor().get()).isEqualTo(2); + } + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void createGraphNumberOfShards(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isCluster()); + String name = "graph-" + rnd(); + final String edgeCollection = rndName(); + final String fromCollection = rndName(); + final String toCollection = rndName(); + final Collection edgeDefinitions = + Collections.singletonList(new EdgeDefinition().collection(edgeCollection).from(fromCollection).to(toCollection)); + final GraphEntity result = db + .createGraph(name, edgeDefinitions, new GraphCreateOptions().numberOfShards(2)).get(); + assertThat(result).isNotNull(); + for (final String collection : Arrays.asList(edgeCollection, fromCollection, toCollection)) { + final CollectionPropertiesEntity properties = db.collection(collection).getProperties().get(); + assertThat(properties.getNumberOfShards()).isEqualTo(2); + } + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void getGraphs(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + String name = "graph-" + rnd(); + db.createGraph(name, null, null).get(); + final Collection graphs = db.getGraphs().get(); + assertThat(graphs).hasSizeGreaterThanOrEqualTo(1); + long count = graphs.stream().map(GraphEntity::getName).filter(name::equals).count(); + assertThat(count).isEqualTo(1L); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void transactionString(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + final TransactionOptions options = new TransactionOptions().params("test"); + final RawJson result = db.transaction("function (params) {return params;}", RawJson.class, options).get(); + assertThat(result.get()).isEqualTo("\"test\""); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void transactionNumber(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + final TransactionOptions options = new TransactionOptions().params(5); + final Integer result = db.transaction("function (params) {return params;}", Integer.class, options).get(); + assertThat(result).isEqualTo(5); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void transactionJsonNode(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + final TransactionOptions options = new TransactionOptions().params(JsonNodeFactory.instance.textNode("test")); + final JsonNode result = db.transaction("function (params) {return params;}", JsonNode.class, options).get(); + assertThat(result.isTextual()).isTrue(); + assertThat(result.asText()).isEqualTo("test"); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void transactionJsonObject(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + ObjectNode params = JsonNodeFactory.instance.objectNode().put("foo", "hello").put("bar", "world"); + final TransactionOptions options = new TransactionOptions().params(params); + final RawJson result = db + .transaction("function (params) { return params['foo'] + ' ' + params['bar'];}", RawJson.class, + options).get(); + assertThat(result.get()).isEqualTo("\"hello world\""); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void transactionJsonArray(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + ArrayNode params = JsonNodeFactory.instance.arrayNode().add("hello").add("world"); + final TransactionOptions options = new TransactionOptions().params(params); + final RawJson result = db + .transaction("function (params) { return params[0] + ' ' + params[1];}", RawJson.class, options).get(); + assertThat(result.get()).isEqualTo("\"hello world\""); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void transactionMap(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + final Map params = new MapBuilder().put("foo", "hello").put("bar", "world").get(); + final TransactionOptions options = new TransactionOptions().params(params); + final RawJson result = db + .transaction("function (params) { return params['foo'] + ' ' + params['bar'];}", RawJson.class, + options).get(); + assertThat(result.get()).isEqualTo("\"hello world\""); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void transactionArray(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + final String[] params = new String[]{"hello", "world"}; + final TransactionOptions options = new TransactionOptions().params(params); + final RawJson result = db + .transaction("function (params) { return params[0] + ' ' + params[1];}", RawJson.class, options).get(); + assertThat(result.get()).isEqualTo("\"hello world\""); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void transactionCollection(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + final Collection params = new ArrayList<>(); + params.add("hello"); + params.add("world"); + final TransactionOptions options = new TransactionOptions().params(params); + final RawJson result = db + .transaction("function (params) { return params[0] + ' ' + params[1];}", RawJson.class, options).get(); + assertThat(result.get()).isEqualTo("\"hello world\""); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void transactionInsertJson(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + String key = "key-" + rnd(); + final TransactionOptions options = new TransactionOptions().params("{\"_key\":\"" + key + "\"}") + .writeCollections(CNAME1); + db.transaction("function (params) { " + + "var db = require('internal').db;" + + "db." + CNAME1 + ".save(JSON.parse(params));" + + "}", Void.class, options).get(); + assertThat(db.collection(CNAME1).getDocument(key, RawJson.class).get()).isNotNull(); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void transactionExclusiveWrite(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 4)); + String key = "key-" + rnd(); + final TransactionOptions options = new TransactionOptions().params("{\"_key\":\"" + key + "\"}") + .exclusiveCollections(CNAME1); + db.transaction("function (params) { " + + "var db = require('internal').db;" + + "db." + CNAME1 + ".save(JSON.parse(params));" + + "}", Void.class, options).get(); + assertThat(db.collection(CNAME1).getDocument(key, RawJson.class).get()).isNotNull(); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void transactionEmpty(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + db.transaction("function () {}", Void.class, null).get(); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void transactionAllowImplicit(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + final String action = "function (params) {" + "var db = require('internal').db;" + + "return {'a':db." + CNAME1 + ".all().toArray()[0], 'b':db." + CNAME2 + ".all().toArray()[0]};" + + "}"; + final TransactionOptions options = new TransactionOptions().readCollections(CNAME1); + db.transaction(action, JsonNode.class, options).get(); + options.allowImplicit(false); + Throwable thrown = catchThrowable(() -> db.transaction(action, JsonNode.class, options).get()).getCause(); + assertThat(thrown) + .isInstanceOf(ArangoDBException.class) + .extracting(it -> ((ArangoDBException) it).getResponseCode()) + .isEqualTo(400); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void transactionPojoReturn(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + final String action = "function() { return {'value':'hello world'}; }"; + final TransactionTestEntity res = db.transaction(action, TransactionTestEntity.class, new TransactionOptions()).get(); + assertThat(res).isNotNull(); + assertThat(res.value).isEqualTo("hello world"); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void getInfo(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + final DatabaseEntity info = db.getInfo().get(); + assertThat(info).isNotNull(); + assertThat(info.getId()).isNotNull(); + assertThat(info.getName()).isEqualTo(getTestDb()); + assertThat(info.getPath()).isNotNull(); + assertThat(info.getIsSystem()).isFalse(); + + if (isAtLeastVersion(3, 6) && isCluster()) { + assertThat(info.getSharding()).isNotNull(); + assertThat(info.getWriteConcern()).isNotNull(); + assertThat(info.getReplicationFactor()).isNotNull(); + } + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void shouldIncludeExceptionMessage(ArangoDatabaseAsync db) { + assumeTrue(isAtLeastVersion(3, 4)); + + final String exceptionMessage = "My error context"; + final String action = "function (params) {" + "throw '" + exceptionMessage + "';" + "}"; + Throwable thrown = catchThrowable(() -> db.transaction(action, Void.class, null).get()).getCause(); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + assertThat(((ArangoDBException) thrown).getErrorMessage()).isEqualTo(exceptionMessage); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void reloadRouting(ArangoDatabaseAsync db) { + db.reloadRouting(); + } + + public static class TransactionTestEntity { + private String value; + + public String getValue() { + return value; + } + + public void setValue(String value) { + this.value = value; + } + } +} diff --git a/test-functional/src/test/java/com/arangodb/ArangoDatabaseTest.java b/test-functional/src/test/java/com/arangodb/ArangoDatabaseTest.java new file mode 100644 index 000000000..3763aacbf --- /dev/null +++ b/test-functional/src/test/java/com/arangodb/ArangoDatabaseTest.java @@ -0,0 +1,1853 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb; + +import com.arangodb.entity.*; +import com.arangodb.entity.QueryCachePropertiesEntity.CacheMode; +import com.arangodb.internal.serde.InternalSerde; +import com.arangodb.model.*; +import com.arangodb.util.*; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.node.ArrayNode; +import com.fasterxml.jackson.databind.node.JsonNodeFactory; +import com.fasterxml.jackson.databind.node.ObjectNode; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; + +import java.io.IOException; +import java.util.*; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.catchThrowable; +import static org.junit.jupiter.api.Assertions.fail; +import static org.junit.jupiter.api.Assumptions.assumeTrue; +import static org.assertj.core.api.InstanceOfAssertFactories.*; + + +/** + * @author Mark Vollmary + * @author Michele Rastelli + */ +class ArangoDatabaseTest extends BaseJunit5 { + + private static final String CNAME1 = "ArangoDatabaseTest_collection_1"; + private static final String CNAME2 = "ArangoDatabaseTest_collection_2"; + private static final String ENAMES = "ArangoDatabaseTest_edge_collection"; + + @BeforeAll + static void init() { + BaseJunit5.initDB(); + BaseJunit5.initCollections(CNAME1, CNAME2); + BaseJunit5.initEdgeCollections(ENAMES); + } + + @ParameterizedTest + @MethodSource("dbs") + void getVersion(ArangoDatabase db) { + final ArangoDBVersion version = db.getVersion(); + assertThat(version).isNotNull(); + assertThat(version.getServer()).isNotNull(); + assertThat(version.getVersion()).isNotNull(); + } + + @ParameterizedTest + @MethodSource("dbs") + void getEngine(ArangoDatabase db) { + final ArangoDBEngine engine = db.getEngine(); + assertThat(engine).isNotNull(); + assertThat(engine.getName()).isNotNull(); + } + + @ParameterizedTest + @MethodSource("arangos") + void exists(ArangoDB arangoDB) { + assertThat(arangoDB.db(getTestDb()).exists()).isTrue(); + assertThat(arangoDB.db("no").exists()).isFalse(); + } + + @ParameterizedTest + @MethodSource("dbs") + void getAccessibleDatabases(ArangoDatabase db) { + final Collection dbs = db.getAccessibleDatabases(); + assertThat(dbs).contains("_system"); + } + + @ParameterizedTest + @MethodSource("dbs") + void createCollection(ArangoDatabase db) { + String name = rndName(); + final CollectionEntity result = db.createCollection(name, null); + assertThat(result).isNotNull(); + assertThat(result.getId()).isNotNull(); + } + + @ParameterizedTest + @MethodSource("dbs") + void createCollectionWithNotNormalizedName(ArangoDatabase db) { + assumeTrue(supportsExtendedNames()); + final String colName = "testCol-\u006E\u0303\u00f1"; + + Throwable thrown = catchThrowable(() -> db.createCollection(colName)); + assertThat(thrown) + .isInstanceOf(ArangoDBException.class) + .hasMessageContaining("normalized") + .extracting(it -> ((ArangoDBException) it).getResponseCode()).isEqualTo(400); + } + + @ParameterizedTest + @MethodSource("dbs") + void createCollectionWithReplicationFactor(ArangoDatabase db) { + assumeTrue(isCluster()); + String name = rndName(); + final CollectionEntity result = db + .createCollection(name, new CollectionCreateOptions().replicationFactor(2)); + assertThat(result).isNotNull(); + assertThat(result.getId()).isNotNull(); + CollectionPropertiesEntity props = db.collection(name).getProperties(); + assertThat(props.getReplicationFactor().get()).isEqualTo(2); + } + + @ParameterizedTest + @MethodSource("dbs") + void createCollectionWithWriteConcern(ArangoDatabase db) { + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isCluster()); + + String name = rndName(); + final CollectionEntity result = db.createCollection(name, + new CollectionCreateOptions().replicationFactor(2).writeConcern(2)); + assertThat(result).isNotNull(); + assertThat(result.getId()).isNotNull(); + CollectionPropertiesEntity props = db.collection(name).getProperties(); + assertThat(props.getReplicationFactor().get()).isEqualTo(2); + assertThat(props.getWriteConcern()).isEqualTo(2); + } + + @ParameterizedTest + @MethodSource("dbs") + void createSatelliteCollection(ArangoDatabase db) { + assumeTrue(isEnterprise()); + assumeTrue(isCluster()); + + String name = rndName(); + final CollectionEntity result = db + .createCollection(name, new CollectionCreateOptions().replicationFactor(ReplicationFactor.ofSatellite())); + + assertThat(result).isNotNull(); + assertThat(result.getId()).isNotNull(); + CollectionPropertiesEntity props = db.collection(name).getProperties(); + assertThat(props.getReplicationFactor()).isEqualTo(ReplicationFactor.ofSatellite()); + } + + @ParameterizedTest + @MethodSource("dbs") + void createCollectionWithNumberOfShards(ArangoDatabase db) { + assumeTrue(isCluster()); + String name = rndName(); + final CollectionEntity result = db + .createCollection(name, new CollectionCreateOptions().numberOfShards(2)); + + assertThat(result).isNotNull(); + assertThat(result.getId()).isNotNull(); + CollectionPropertiesEntity props = db.collection(name).getProperties(); + assertThat(props.getNumberOfShards()).isEqualTo(2); + } + + @ParameterizedTest + @MethodSource("dbs") + void createCollectionWithShardingStrategys(ArangoDatabase db) { + assumeTrue(isAtLeastVersion(3, 4)); + assumeTrue(isCluster()); + + String name = rndName(); + final CollectionEntity result = db.createCollection(name, new CollectionCreateOptions() + .shardingStrategy(ShardingStrategy.COMMUNITY_COMPAT.getInternalName())); + + assertThat(result).isNotNull(); + assertThat(result.getId()).isNotNull(); + CollectionPropertiesEntity props = db.collection(name).getProperties(); + assertThat(props.getShardingStrategy()).isEqualTo(ShardingStrategy.COMMUNITY_COMPAT.getInternalName()); + } + + @ParameterizedTest + @MethodSource("dbs") + void createCollectionWithSmartJoinAttribute(ArangoDatabase db) { + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isEnterprise()); + assumeTrue(isCluster()); + + String fooName = rndName(); + db.collection(fooName).create(); + + String name = rndName(); + final CollectionEntity result = db.createCollection(name, + new CollectionCreateOptions().smartJoinAttribute("test123").distributeShardsLike(fooName).shardKeys("_key:")); + assertThat(result).isNotNull(); + assertThat(result.getId()).isNotNull(); + assertThat(db.collection(name).getProperties().getSmartJoinAttribute()).isEqualTo("test123"); + } + + @ParameterizedTest + @MethodSource("dbs") + void createCollectionWithSmartJoinAttributeWrong(ArangoDatabase db) { + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isEnterprise()); + assumeTrue(isCluster()); + + String name = rndName(); + + try { + db.createCollection(name, new CollectionCreateOptions().smartJoinAttribute("test123")); + } catch (ArangoDBException e) { + assertThat(e.getErrorNum()).isEqualTo(4006); + assertThat(e.getResponseCode()).isEqualTo(400); + } + } + + @ParameterizedTest + @MethodSource("dbs") + void createCollectionWithNumberOfShardsAndShardKey(ArangoDatabase db) { + assumeTrue(isCluster()); + + String name = rndName(); + final CollectionEntity result = db + .createCollection(name, new CollectionCreateOptions().numberOfShards(2).shardKeys("a")); + assertThat(result).isNotNull(); + assertThat(result.getId()).isNotNull(); + final CollectionPropertiesEntity properties = db.collection(name).getProperties(); + assertThat(properties.getNumberOfShards()).isEqualTo(2); + assertThat(properties.getShardKeys()).hasSize(1); + } + + @ParameterizedTest + @MethodSource("dbs") + void createCollectionWithNumberOfShardsAndShardKeys(ArangoDatabase db) { + assumeTrue(isCluster()); + String name = rndName(); + final CollectionEntity result = db.createCollection(name, + new CollectionCreateOptions().numberOfShards(2).shardKeys("a", "b")); + assertThat(result).isNotNull(); + assertThat(result.getId()).isNotNull(); + final CollectionPropertiesEntity properties = db.collection(name).getProperties(); + assertThat(properties.getNumberOfShards()).isEqualTo(2); + assertThat(properties.getShardKeys()).hasSize(2); + } + + @ParameterizedTest + @MethodSource("dbs") + void createCollectionWithDistributeShardsLike(ArangoDatabase db) { + assumeTrue(isEnterprise()); + assumeTrue(isCluster()); + + final Integer numberOfShards = 3; + + String name1 = rndName(); + String name2 = rndName(); + db.createCollection(name1, new CollectionCreateOptions().numberOfShards(numberOfShards)); + db.createCollection(name2, new CollectionCreateOptions().distributeShardsLike(name1)); + + assertThat(db.collection(name1).getProperties().getNumberOfShards()).isEqualTo(numberOfShards); + assertThat(db.collection(name2).getProperties().getNumberOfShards()).isEqualTo(numberOfShards); + } + + private void createCollectionWithKeyType(ArangoDatabase db, KeyType keyType) { + String name = rndName(); + db.createCollection(name, new CollectionCreateOptions().keyOptions( + false, + keyType, + null, + null + )); + assertThat(db.collection(name).getProperties().getKeyOptions().getType()).isEqualTo(keyType); + } + + @ParameterizedTest + @MethodSource("dbs") + void createCollectionWithKeyTypeAutoincrement(ArangoDatabase db) { + assumeTrue(isSingleServer()); + createCollectionWithKeyType(db, KeyType.autoincrement); + } + + @ParameterizedTest + @MethodSource("dbs") + void createCollectionWithKeyTypePadded(ArangoDatabase db) { + assumeTrue(isAtLeastVersion(3, 4)); + createCollectionWithKeyType(db, KeyType.padded); + } + + @ParameterizedTest + @MethodSource("dbs") + void createCollectionWithKeyTypeTraditional(ArangoDatabase db) { + createCollectionWithKeyType(db, KeyType.traditional); + } + + @ParameterizedTest + @MethodSource("dbs") + void createCollectionWithKeyTypeUuid(ArangoDatabase db) { + assumeTrue(isAtLeastVersion(3, 4)); + createCollectionWithKeyType(db, KeyType.uuid); + } + + @ParameterizedTest + @MethodSource("dbs") + void createCollectionWithJsonSchema(ArangoDatabase db) { + assumeTrue(isAtLeastVersion(3, 7)); + String name = rndName(); + String rule = ("{ " + + " \"properties\": {" + + " \"number\": {" + + " \"type\": \"number\"" + + " }" + + " }" + + " }") + .replaceAll("\\s", ""); + String message = "The document has problems!"; + + final CollectionEntity result = db + .createCollection(name, new CollectionCreateOptions() + .schema( + new CollectionSchema() + .setLevel(CollectionSchema.Level.NEW) + .setMessage(message) + .setRule(rule) + ) + ); + assertThat(result.getSchema().getLevel()).isEqualTo(CollectionSchema.Level.NEW); + assertThat(result.getSchema().getRule()).isEqualTo(rule); + assertThat(result.getSchema().getMessage()).isEqualTo(message); + + CollectionPropertiesEntity props = db.collection(name).getProperties(); + assertThat(props.getSchema().getLevel()).isEqualTo(CollectionSchema.Level.NEW); + assertThat(props.getSchema().getRule()).isEqualTo(rule); + assertThat(props.getSchema().getMessage()).isEqualTo(message); + + BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + doc.addAttribute("number", 33); + db.collection(name).insertDocument(doc); + + BaseDocument wrongDoc = new BaseDocument(UUID.randomUUID().toString()); + wrongDoc.addAttribute("number", "notANumber"); + Throwable thrown = catchThrowable(() -> db.collection(name).insertDocument(wrongDoc)); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + ArangoDBException e = (ArangoDBException) thrown; + + assertThat(e).hasMessageContaining(message); + assertThat(e.getResponseCode()).isEqualTo(400); + assertThat(e.getErrorNum()).isEqualTo(1620); + } + + @ParameterizedTest + @MethodSource("dbs") + void createCollectionWithComputedFields(ArangoDatabase db) { + assumeTrue(isAtLeastVersion(3, 10)); + String cName = rndName(); + ComputedValue cv = new ComputedValue() + .name("foo") + .expression("RETURN 11") + .overwrite(false) + .computeOn(ComputedValue.ComputeOn.insert) + .keepNull(false) + .failOnWarning(true); + + final CollectionEntity result = db.createCollection(cName, new CollectionCreateOptions().computedValues(cv)); + + assertThat(result).isNotNull(); + assertThat(result.getComputedValues()) + .hasSize(1) + .contains(cv); + + ComputedValue cv2 = new ComputedValue() + .name("bar") + .expression("RETURN 22") + .overwrite(true) + .computeOn(ComputedValue.ComputeOn.update, ComputedValue.ComputeOn.replace) + .keepNull(true) + .failOnWarning(false); + + db.collection(cName).changeProperties(new CollectionPropertiesOptions().computedValues(cv2)); + + CollectionPropertiesEntity props = db.collection(cName).getProperties(); + assertThat(props.getComputedValues()) + .hasSize(1) + .contains(cv2); + } + + @ParameterizedTest + @MethodSource("dbs") + void deleteCollection(ArangoDatabase db) { + String name = rndName(); + db.createCollection(name, null); + db.collection(name).drop(); + Throwable thrown = catchThrowable(() -> db.collection(name).getInfo()); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + } + + @ParameterizedTest + @MethodSource("dbs") + void deleteSystemCollection(ArangoDatabase db) { + final String name = "_system_test"; + db.createCollection(name, new CollectionCreateOptions().isSystem(true)); + db.collection(name).drop(true); + Throwable thrown = catchThrowable(() -> db.collection(name).getInfo()); + assertThat(thrown) + .isInstanceOf(ArangoDBException.class) + .extracting(it -> ((ArangoDBException) it).getResponseCode()) + .isEqualTo(404); + } + + @ParameterizedTest + @MethodSource("dbs") + void deleteSystemCollectionFail(ArangoDatabase db) { + final String name = "_system_test"; + ArangoCollection collection = db.collection(name); + if (collection.exists()) + collection.drop(true); + + db.createCollection(name, new CollectionCreateOptions().isSystem(true)); + try { + collection.drop(); + fail(); + } catch (final ArangoDBException e) { + assertThat(e.getResponseCode()).isEqualTo(403); + } + collection.drop(true); + try { + collection.getInfo(); + fail(); + } catch (final ArangoDBException e) { + assertThat(e.getResponseCode()).isEqualTo(404); + } + } + + @ParameterizedTest + @MethodSource("dbs") + void getIndex(ArangoDatabase db) { + final Collection fields = Collections.singletonList("field-" + rnd()); + final IndexEntity createResult = db.collection(CNAME1).ensurePersistentIndex(fields, null); + final IndexEntity readResult = db.getIndex(createResult.getId()); + assertThat(readResult.getId()).isEqualTo(createResult.getId()); + assertThat(readResult.getType()).isEqualTo(createResult.getType()); + } + + @ParameterizedTest + @MethodSource("dbs") + void deleteIndex(ArangoDatabase db) { + final Collection fields = Collections.singletonList("field-" + rnd()); + final IndexEntity createResult = db.collection(CNAME1).ensurePersistentIndex(fields, null); + final String id = db.deleteIndex(createResult.getId()); + assertThat(id).isEqualTo(createResult.getId()); + try { + db.getIndex(id); + fail(); + } catch (final ArangoDBException e) { + assertThat(e.getResponseCode()).isEqualTo(404); + } + } + + @ParameterizedTest + @MethodSource("dbs") + void getCollections(ArangoDatabase db) { + final Collection collections = db.getCollections(null); + long count = collections.stream().map(CollectionEntity::getName).filter(it -> it.equals(CNAME1)).count(); + assertThat(count).isEqualTo(1L); + } + + @ParameterizedTest + @MethodSource("dbs") + void getCollectionsExcludeSystem(ArangoDatabase db) { + final CollectionsReadOptions options = new CollectionsReadOptions().excludeSystem(true); + final Collection nonSystemCollections = db.getCollections(options); + final Collection allCollections = db.getCollections(null); + assertThat(allCollections).hasSizeGreaterThan(nonSystemCollections.size()); + } + + @ParameterizedTest + @MethodSource("arangos") + void grantAccess(ArangoDB arangoDB) { + String user = "user-" + rnd(); + arangoDB.createUser(user, "1234", null); + try { + arangoDB.db(getTestDb()).grantAccess(user); + } finally { + arangoDB.deleteUser(user); + } + } + + @ParameterizedTest + @MethodSource("arangos") + void grantAccessRW(ArangoDB arangoDB) { + String user = "user-" + rnd(); + arangoDB.createUser(user, "1234", null); + try { + arangoDB.db(getTestDb()).grantAccess(user, Permissions.RW); + } finally { + arangoDB.deleteUser(user); + } + } + + @ParameterizedTest + @MethodSource("arangos") + void grantAccessRO(ArangoDB arangoDB) { + String user = "user-" + rnd(); + arangoDB.createUser(user, "1234", null); + try { + arangoDB.db(getTestDb()).grantAccess(user, Permissions.RO); + } finally { + arangoDB.deleteUser(user); + } + } + + @ParameterizedTest + @MethodSource("arangos") + void grantAccessNONE(ArangoDB arangoDB) { + String user = "user-" + rnd(); + arangoDB.createUser(user, "1234", null); + try { + arangoDB.db(getTestDb()).grantAccess(user, Permissions.NONE); + } finally { + arangoDB.deleteUser(user); + } + } + + @ParameterizedTest + @MethodSource("dbs") + void grantAccessUserNotFound(ArangoDatabase db) { + String user = "user-" + rnd(); + Throwable thrown = catchThrowable(() -> db.grantAccess(user, Permissions.RW)); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + } + + @ParameterizedTest + @MethodSource("arangos") + void revokeAccess(ArangoDB arangoDB) { + String user = "user-" + rnd(); + arangoDB.createUser(user, "1234", null); + try { + arangoDB.db(getTestDb()).revokeAccess(user); + } finally { + arangoDB.deleteUser(user); + } + } + + @ParameterizedTest + @MethodSource("dbs") + void revokeAccessUserNotFound(ArangoDatabase db) { + String user = "user-" + rnd(); + Throwable thrown = catchThrowable(() -> db.revokeAccess(user)); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + } + + @ParameterizedTest + @MethodSource("arangos") + void resetAccess(ArangoDB arangoDB) { + String user = "user-" + rnd(); + arangoDB.createUser(user, "1234", null); + try { + arangoDB.db(getTestDb()).resetAccess(user); + } finally { + arangoDB.deleteUser(user); + } + } + + @ParameterizedTest + @MethodSource("dbs") + void resetAccessUserNotFound(ArangoDatabase db) { + String user = "user-" + rnd(); + Throwable thrown = catchThrowable(() -> db.resetAccess(user)); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + } + + @ParameterizedTest + @MethodSource("arangos") + void grantDefaultCollectionAccess(ArangoDB arangoDB) { + String user = "user-" + rnd(); + arangoDB.createUser(user, "1234"); + try { + arangoDB.db(getTestDb()).grantDefaultCollectionAccess(user, Permissions.RW); + } finally { + arangoDB.deleteUser(user); + } + } + + @ParameterizedTest + @MethodSource("dbs") + void getPermissions(ArangoDatabase db) { + assertThat(db.getPermissions("root")).isEqualTo(Permissions.RW); + } + + @ParameterizedTest + @MethodSource("dbs") + void query(ArangoDatabase db) { + for (int i = 0; i < 10; i++) { + db.collection(CNAME1).insertDocument(new BaseDocument(), null); + } + final ArangoCursor cursor = db.query("for i in " + CNAME1 + " return i._id", String.class); + assertThat((Object) cursor).isNotNull(); + for (int i = 0; i < 10; i++, cursor.next()) { + assertThat((Iterator) cursor).hasNext(); + } + } + + @ParameterizedTest + @MethodSource("dbs") + void queryWithNullBindVar(ArangoDatabase db) { + final ArangoCursor cursor = db.query("return @foo", Object.class, Collections.singletonMap("foo", null)); + assertThat(cursor.hasNext()).isTrue(); + assertThat(cursor.next()).isNull(); + } + + @ParameterizedTest + @MethodSource("dbs") + void queryForEach(ArangoDatabase db) { + for (int i = 0; i < 10; i++) { + db.collection(CNAME1).insertDocument(new BaseDocument(), null); + } + final ArangoCursor cursor = db.query("for i in " + CNAME1 + " return i._id", String.class); + assertThat((Object) cursor).isNotNull(); + + int i = 0; + while (cursor.hasNext()) { + cursor.next(); + i++; + } + assertThat(i).isGreaterThanOrEqualTo(10); + } + + @ParameterizedTest + @MethodSource("dbs") + void queryWithCount(ArangoDatabase db) { + for (int i = 0; i < 10; i++) { + db.collection(CNAME1).insertDocument(new BaseDocument(), null); + } + + final ArangoCursor cursor = db + .query("for i in " + CNAME1 + " Limit 6 return i._id", String.class, new AqlQueryOptions().count(true)); + assertThat((Object) cursor).isNotNull(); + for (int i = 1; i <= 6; i++, cursor.next()) { + assertThat(cursor.hasNext()).isTrue(); + } + assertThat(cursor.getCount()).isEqualTo(6); + } + + @ParameterizedTest + @MethodSource("dbs") + void queryWithLimitAndFullCount(ArangoDatabase db) { + for (int i = 0; i < 10; i++) { + db.collection(CNAME1).insertDocument(new BaseDocument(), null); + } + + final ArangoCursor cursor = db + .query("for i in " + CNAME1 + " Limit 5 return i._id", String.class, new AqlQueryOptions().fullCount(true)); + assertThat((Object) cursor).isNotNull(); + for (int i = 0; i < 5; i++, cursor.next()) { + assertThat((Iterator) cursor).hasNext(); + } + assertThat(cursor.getStats()).isNotNull(); + assertThat(cursor.getStats().getExecutionTime()).isPositive(); + assertThat((cursor.getStats().getFullCount())).isGreaterThanOrEqualTo(10); + } + + @ParameterizedTest + @MethodSource("dbs") + void queryWithLimitAndFullCountAsCustomOption(ArangoDatabase db) { + for (int i = 0; i < 10; i++) { + db.collection(CNAME1).insertDocument(new BaseDocument(), null); + } + + final ArangoCursor cursor = db + .query("for i in " + CNAME1 + " Limit 5 return i._id", String.class, new AqlQueryOptions() + .customOption("fullCount", true)); + assertThat((Object) cursor).isNotNull(); + for (int i = 0; i < 5; i++, cursor.next()) { + assertThat((Iterator) cursor).hasNext(); + } + assertThat(cursor.getStats()).isNotNull(); + assertThat(cursor.getStats().getExecutionTime()).isPositive(); + assertThat((cursor.getStats().getFullCount())).isGreaterThanOrEqualTo(10); + } + + @ParameterizedTest + @MethodSource("dbs") + void queryStats(ArangoDatabase db) { + for (int i = 0; i < 10; i++) { + db.collection(CNAME1).insertDocument(new BaseDocument(), null); + } + + final ArangoCursor cursor = db.query("for i in " + CNAME1 + " return i", Object.class); + assertThat((Object) cursor).isNotNull(); + for (int i = 0; i < 5; i++, cursor.next()) { + assertThat((Iterator) cursor).hasNext(); + } + assertThat(cursor.getStats()).isNotNull(); + assertThat(cursor.getStats().getWritesExecuted()).isNotNull(); + assertThat(cursor.getStats().getWritesIgnored()).isNotNull(); + assertThat(cursor.getStats().getScannedFull()).isNotNull(); + assertThat(cursor.getStats().getScannedIndex()).isNotNull(); + assertThat(cursor.getStats().getFiltered()).isNotNull(); + assertThat(cursor.getStats().getExecutionTime()).isNotNull(); + assertThat(cursor.getStats().getPeakMemoryUsage()).isNotNull(); + assertThat(cursor.getStats().getCursorsCreated()).isNotNull(); + assertThat(cursor.getStats().getCursorsRearmed()).isNotNull(); + assertThat(cursor.getStats().getCacheHits()).isNotNull(); + assertThat(cursor.getStats().getCacheMisses()).isNotNull(); + assertThat(cursor.getStats().getIntermediateCommits()).isNotNull(); + if (isAtLeastVersion(3, 12)) { + assertThat(cursor.getStats().getDocumentLookups()).isNotNull(); + assertThat(cursor.getStats().getSeeks()).isNotNull(); + } + } + + @ParameterizedTest + @MethodSource("dbs") + void queryWithBatchSize(ArangoDatabase db) { + for (int i = 0; i < 10; i++) { + db.collection(CNAME1).insertDocument(new BaseDocument(), null); + } + + final ArangoCursor cursor = db + .query("for i in " + CNAME1 + " return i._id", String.class, new AqlQueryOptions().batchSize(5).count(true)); + + assertThat((Object) cursor).isNotNull(); + for (int i = 0; i < 10; i++, cursor.next()) { + assertThat((Iterator) cursor).hasNext(); + } + } + + @ParameterizedTest + @MethodSource("dbs") + void queryIterateWithBatchSize(ArangoDatabase db) { + for (int i = 0; i < 10; i++) { + db.collection(CNAME1).insertDocument(new BaseDocument(), null); + } + + final ArangoCursor cursor = db + .query("for i in " + CNAME1 + " return i._id", String.class, new AqlQueryOptions().batchSize(5).count(true)); + + assertThat((Object) cursor).isNotNull(); + final AtomicInteger i = new AtomicInteger(0); + for (; cursor.hasNext(); cursor.next()) { + i.incrementAndGet(); + } + assertThat(i.get()).isGreaterThanOrEqualTo(10); + } + + @SlowTest + @ParameterizedTest + @MethodSource("dbs") + void queryWithTTL(ArangoDatabase db) throws InterruptedException { + // set TTL to 1 seconds and get the second batch after 2 seconds! + final int ttl = 1; + final int wait = 2; + for (int i = 0; i < 10; i++) { + db.collection(CNAME1).insertDocument(new BaseDocument(), null); + } + + final ArangoCursor cursor = db + .query("for i in " + CNAME1 + " return i._id", String.class, new AqlQueryOptions().batchSize(5).ttl(ttl)); + + assertThat((Iterable) cursor).isNotNull(); + + try { + for (int i = 0; i < 10; i++, cursor.next()) { + assertThat(cursor.hasNext()).isTrue(); + if (i == 1) { + Thread.sleep(wait * 1000); + } + } + fail("this should fail"); + } catch (final ArangoDBException ex) { + assertThat(ex.getMessage()).isEqualTo("Response: 404, Error: 1600 - cursor not found"); + } + } + + @ParameterizedTest + @MethodSource("dbs") + void queryRawBytes(ArangoDatabase db) { + InternalSerde serde = db.getSerde(); + RawBytes doc = RawBytes.of(serde.serialize(Collections.singletonMap("value", 1))); + RawBytes res = db.query("RETURN @doc", RawBytes.class, Collections.singletonMap("doc", doc)).next(); + JsonNode data = serde.deserialize(res.get(), JsonNode.class); + assertThat(data.isObject()).isTrue(); + assertThat(data.get("value").isNumber()).isTrue(); + assertThat(data.get("value").numberValue()).isEqualTo(1); + } + + @ParameterizedTest + @MethodSource("dbs") + void queryUserDataScalar(ArangoDatabase db) { + List docs = Arrays.asList("a", "b", "c"); + ArangoCursor res = db.query("FOR d IN @docs RETURN d", String.class, + Collections.singletonMap("docs", docs), new AqlQueryOptions().batchSize(1)); + assertThat((Iterable) res).contains("a", "b", "c"); + } + + @ParameterizedTest + @MethodSource("dbs") + void queryUserDataManaged(ArangoDatabase db) { + RawJson a = RawJson.of("\"foo\""); + RawJson b = RawJson.of("{\"key\":\"value\"}"); + RawJson c = RawJson.of("[1,null,true,\"bla\",{},[],\"\"]"); + RawJson docs = RawJson.of("[" + a.get() + "," + b.get() + "," + c.get() + "]"); + ArangoCursor res = db.query("FOR d IN @docs RETURN d", RawJson.class, + Collections.singletonMap("docs", docs), new AqlQueryOptions().batchSize(1)); + assertThat((Iterable) res).containsExactly(a, b, c); + } + + @ParameterizedTest + @MethodSource("dbs") + void queryUserData(ArangoDatabase db) { + Object a = "foo"; + Object b = Collections.singletonMap("key", "value"); + Object c = Arrays.asList(1, null, true, "bla", Collections.emptyMap(), Collections.emptyList(), ""); + List docs = Arrays.asList(a, b, c); + ArangoCursor res = db.query("FOR d IN @docs RETURN d", Object.class, + Collections.singletonMap("docs", docs), new AqlQueryOptions().batchSize(1)); + assertThat((Iterable) res).containsExactly(a, b, c); + } + + @ParameterizedTest + @MethodSource("dbs") + void changeQueryCache(ArangoDatabase db) { + QueryCachePropertiesEntity properties = db.getQueryCacheProperties(); + assertThat(properties).isNotNull(); + assertThat(properties.getMode()).isEqualTo(CacheMode.off); + assertThat(properties.getMaxResults()).isPositive(); + + properties.setMode(CacheMode.on); + properties = db.setQueryCacheProperties(properties); + assertThat(properties).isNotNull(); + assertThat(properties.getMode()).isEqualTo(CacheMode.on); + + properties = db.getQueryCacheProperties(); + assertThat(properties.getMode()).isEqualTo(CacheMode.on); + + final QueryCachePropertiesEntity properties2 = new QueryCachePropertiesEntity(); + properties2.setMode(CacheMode.off); + db.setQueryCacheProperties(properties2); + } + + @ParameterizedTest + @MethodSource("dbs") + void queryWithCache(ArangoDatabase db) { + assumeTrue(isSingleServer()); + for (int i = 0; i < 10; i++) { + db.collection(CNAME1).insertDocument(new BaseDocument(), null); + } + + final QueryCachePropertiesEntity properties = new QueryCachePropertiesEntity(); + properties.setMode(CacheMode.on); + db.setQueryCacheProperties(properties); + + final ArangoCursor cursor = db + .query("FOR t IN " + CNAME1 + " FILTER t.age >= 10 SORT t.age RETURN t._id", String.class, + new AqlQueryOptions().cache(true)); + + assertThat((Object) cursor).isNotNull(); + assertThat(cursor.isCached()).isFalse(); + + final ArangoCursor cachedCursor = db + .query("FOR t IN " + CNAME1 + " FILTER t.age >= 10 SORT t.age RETURN t._id", String.class, + new AqlQueryOptions().cache(true)); + + assertThat((Object) cachedCursor).isNotNull(); + assertThat(cachedCursor.isCached()).isTrue(); + + final QueryCachePropertiesEntity properties2 = new QueryCachePropertiesEntity(); + properties2.setMode(CacheMode.off); + db.setQueryCacheProperties(properties2); + } + + @ParameterizedTest + @MethodSource("dbs") + void queryWithMemoryLimit(ArangoDatabase db) { + Throwable thrown = catchThrowable(() -> db.query("RETURN 1..100000", String.class, + new AqlQueryOptions().memoryLimit(32 * 1024L))); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + assertThat(((ArangoDBException) thrown).getErrorNum()).isEqualTo(32); + } + + @ParameterizedTest + @MethodSource("dbs") + void queryWithFailOnWarningTrue(ArangoDatabase db) { + Throwable thrown = catchThrowable(() -> db.query("RETURN 1 / 0", String.class, + new AqlQueryOptions().failOnWarning(true))); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + } + + @ParameterizedTest + @MethodSource("dbs") + void queryWithFailOnWarningFalse(ArangoDatabase db) { + final ArangoCursor cursor = db + .query("RETURN 1 / 0", String.class, new AqlQueryOptions().failOnWarning(false)); + assertThat(cursor.next()).isNull(); + } + + @SlowTest + @ParameterizedTest + @MethodSource("dbs") + void queryWithTimeout(ArangoDatabase db) { + assumeTrue(isAtLeastVersion(3, 6)); + Throwable thrown = catchThrowable(() -> db.query("RETURN SLEEP(1)", String.class, + new AqlQueryOptions().maxRuntime(0.1)).next()); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + assertThat(((ArangoDBException) thrown).getResponseCode()).isEqualTo(410); + } + + @ParameterizedTest + @MethodSource("dbs") + void queryWithMaxWarningCount(ArangoDatabase db) { + final ArangoCursor cursorWithWarnings = db + .query("RETURN 1 / 0", String.class, new AqlQueryOptions()); + assertThat(cursorWithWarnings.getWarnings()).hasSize(1); + final ArangoCursor cursorWithLimitedWarnings = db + .query("RETURN 1 / 0", String.class, new AqlQueryOptions().maxWarningCount(0L)); + final Collection warnings = cursorWithLimitedWarnings.getWarnings(); + assertThat(warnings).isNullOrEmpty(); + } + + @ParameterizedTest + @MethodSource("dbs") + void queryCursor(ArangoDatabase db) { + ArangoCursor cursor = db.query("for i in 1..4 return i", Integer.class, + new AqlQueryOptions().batchSize(1)); + List result = new ArrayList<>(); + result.add(cursor.next()); + result.add(cursor.next()); + ArangoCursor cursor2 = db.cursor(cursor.getId(), Integer.class); + result.add(cursor2.next()); + result.add(cursor2.next()); + assertThat(cursor2.hasNext()).isFalse(); + assertThat(result).containsExactly(1, 2, 3, 4); + } + + @ParameterizedTest + @MethodSource("dbs") + void queryCursorInTx(ArangoDatabase db) { + StreamTransactionEntity tx = db.beginStreamTransaction(new StreamTransactionOptions()); + ArangoCursor cursor = db.query("for i in 1..4 return i", Integer.class, + new AqlQueryOptions().batchSize(1).streamTransactionId(tx.getId())); + List result = new ArrayList<>(); + result.add(cursor.next()); + result.add(cursor.next()); + ArangoCursor cursor2 = db.cursor(cursor.getId(), Integer.class, + new AqlQueryOptions().streamTransactionId(tx.getId()) + ); + result.add(cursor2.next()); + result.add(cursor2.next()); + assertThat(cursor2.hasNext()).isFalse(); + assertThat(result).containsExactly(1, 2, 3, 4); + db.abortStreamTransaction(tx.getId()); + } + + @ParameterizedTest + @MethodSource("dbs") + void queryCursorRetry(ArangoDatabase db) throws IOException { + assumeTrue(isAtLeastVersion(3, 11)); + ArangoCursor cursor = db.query("for i in 1..4 return i", Integer.class, + new AqlQueryOptions().batchSize(1).allowRetry(true)); + List result = new ArrayList<>(); + result.add(cursor.next()); + result.add(cursor.next()); + ArangoCursor cursor2 = db.cursor(cursor.getId(), Integer.class, cursor.getNextBatchId()); + result.add(cursor2.next()); + result.add(cursor2.next()); + cursor2.close(); + assertThat(cursor2.hasNext()).isFalse(); + assertThat(result).containsExactly(1, 2, 3, 4); + } + + @ParameterizedTest + @MethodSource("dbs") + void queryCursorRetryInTx(ArangoDatabase db) throws IOException { + assumeTrue(isAtLeastVersion(3, 11)); + StreamTransactionEntity tx = db.beginStreamTransaction(new StreamTransactionOptions()); + ArangoCursor cursor = db.query("for i in 1..4 return i", Integer.class, + new AqlQueryOptions().batchSize(1).allowRetry(true).streamTransactionId(tx.getId())); + List result = new ArrayList<>(); + result.add(cursor.next()); + result.add(cursor.next()); + ArangoCursor cursor2 = db.cursor(cursor.getId(), Integer.class, cursor.getNextBatchId(), + new AqlQueryOptions().streamTransactionId(tx.getId()) + ); + result.add(cursor2.next()); + result.add(cursor2.next()); + cursor2.close(); + assertThat(cursor2.hasNext()).isFalse(); + assertThat(result).containsExactly(1, 2, 3, 4); + db.abortStreamTransaction(tx.getId()); + } + + @ParameterizedTest + @MethodSource("dbs") + void changeQueryTrackingProperties(ArangoDatabase db) { + try { + QueryTrackingPropertiesEntity properties = db.getQueryTrackingProperties(); + assertThat(properties).isNotNull(); + assertThat(properties.getEnabled()).isTrue(); + assertThat(properties.getTrackSlowQueries()).isTrue(); + assertThat(properties.getMaxQueryStringLength()).isPositive(); + assertThat(properties.getMaxSlowQueries()).isPositive(); + assertThat(properties.getSlowQueryThreshold()).isPositive(); + properties.setEnabled(false); + properties = db.setQueryTrackingProperties(properties); + assertThat(properties).isNotNull(); + assertThat(properties.getEnabled()).isFalse(); + properties = db.getQueryTrackingProperties(); + assertThat(properties.getEnabled()).isFalse(); + } finally { + final QueryTrackingPropertiesEntity properties = new QueryTrackingPropertiesEntity(); + properties.setEnabled(true); + db.setQueryTrackingProperties(properties); + } + } + + @ParameterizedTest + @MethodSource("dbs") + void queryWithBindVars(ArangoDatabase db) { + for (int i = 0; i < 10; i++) { + final BaseDocument baseDocument = new BaseDocument(UUID.randomUUID().toString()); + baseDocument.addAttribute("age", 20 + i); + db.collection(CNAME1).insertDocument(baseDocument, null); + } + final Map bindVars = new HashMap<>(); + bindVars.put("@coll", CNAME1); + bindVars.put("age", 25); + + final ArangoCursor cursor = db + .query("FOR t IN @@coll FILTER t.age >= @age SORT t.age RETURN t._id", String.class, bindVars); + + assertThat((Object) cursor).isNotNull(); + + for (int i = 0; i < 5; i++, cursor.next()) { + assertThat((Iterator) cursor).hasNext(); + } + } + + @ParameterizedTest + @MethodSource("dbs") + void queryWithRawBindVars(ArangoDatabase db) { + final Map bindVars = new HashMap<>(); + bindVars.put("foo", RawJson.of("\"fooValue\"")); + bindVars.put("bar", RawBytes.of(db.getSerde().serializeUserData(11))); + + final JsonNode res = db.query("RETURN {foo: @foo, bar: @bar}", JsonNode.class, bindVars).next(); + + assertThat(res.get("foo").textValue()).isEqualTo("fooValue"); + assertThat(res.get("bar").intValue()).isEqualTo(11); + } + + @ParameterizedTest + @MethodSource("arangos") + void queryWithWarning(ArangoDB arangoDB) { + final ArangoCursor cursor = arangoDB.db().query("return 1/0", String.class); + + assertThat((Object) cursor).isNotNull(); + assertThat(cursor.getWarnings()).isNotNull(); + } + + @ParameterizedTest + @MethodSource("dbs") + void queryStream(ArangoDatabase db) { + final ArangoCursor cursor = db + .query("FOR i IN 1..2 RETURN i", Void.class, new AqlQueryOptions().stream(true).count(true)); + assertThat((Object) cursor).isNotNull(); + assertThat(cursor.getCount()).isNull(); + } + + @ParameterizedTest + @MethodSource("dbs") + void queryForceOneShardAttributeValue(ArangoDatabase db) { + assumeTrue(isAtLeastVersion(3, 10)); + assumeTrue(isCluster()); + assumeTrue(isEnterprise()); + + String cname = "forceOneShardAttr-" + UUID.randomUUID(); + db.createCollection(cname, new CollectionCreateOptions() + .shardKeys("foo") + .numberOfShards(3)); + ArangoCollection col = db.collection(cname); + BaseDocument doc = new BaseDocument(); + doc.addAttribute("foo", "bar"); + col.insertDocument(doc); + + ArangoCursor c1 = db + .query("FOR d IN @@c RETURN d", BaseDocument.class, Collections.singletonMap("@c", cname), + new AqlQueryOptions().forceOneShardAttributeValue("bar")); + assertThat(c1.hasNext()).isTrue(); + assertThat(c1.next().getAttribute("foo")).isEqualTo("bar"); + + ArangoCursor c2 = db + .query("FOR d IN @@c RETURN d", BaseDocument.class, Collections.singletonMap("@c", cname), + new AqlQueryOptions().forceOneShardAttributeValue("ooo")); + assertThat(c2.hasNext()).isFalse(); + } + + @ParameterizedTest + @MethodSource("arangos") + void queryClose(ArangoDB arangoDB) throws IOException { + final ArangoCursor cursor = arangoDB.db() + .query("for i in 1..2 return i", String.class, new AqlQueryOptions().batchSize(1)); + cursor.close(); + AtomicInteger count = new AtomicInteger(); + Throwable thrown = catchThrowable(() -> { + while (cursor.hasNext()) { + cursor.next(); + count.incrementAndGet(); + } + }); + + assertThat(thrown).isInstanceOf(ArangoDBException.class); + assertThat(count).hasValue(1); + } + + @ParameterizedTest + @MethodSource("arangos") + void queryCloseShouldBeIdempotent(ArangoDB arangoDB) throws IOException { + ArangoCursor cursor = arangoDB.db().query("for i in 1..2 return i", Integer.class, + new AqlQueryOptions().batchSize(1)); + cursor.close(); + cursor.close(); + } + + @ParameterizedTest + @MethodSource("arangos") + void queryCloseOnCursorWithoutId(ArangoDB arangoDB) throws IOException { + ArangoCursor cursor = arangoDB.db().query("return 1", Integer.class); + cursor.close(); + cursor.close(); + } + + @ParameterizedTest + @MethodSource("dbs") + void queryNoResults(ArangoDatabase db) throws IOException { + final ArangoCursor cursor = db + .query("FOR i IN @@col RETURN i", BaseDocument.class, new MapBuilder().put("@col", CNAME1).get()); + cursor.close(); + } + + @ParameterizedTest + @MethodSource("dbs") + void queryWithNullBindParam(ArangoDatabase db) throws IOException { + final ArangoCursor cursor = db.query("FOR i IN @@col FILTER i.test == @test RETURN i", + BaseDocument.class, new MapBuilder().put("@col", CNAME1).put("test", null).get()); + cursor.close(); + } + + @ParameterizedTest + @MethodSource("dbs") + void queryAllowDirtyRead(ArangoDatabase db) throws IOException { + final ArangoCursor cursor = db.query("FOR i IN @@col FILTER i.test == @test RETURN i", + BaseDocument.class, new MapBuilder().put("@col", CNAME1).put("test", null).get(), + new AqlQueryOptions().allowDirtyRead(true)); + if (isAtLeastVersion(3, 10)) { + assertThat(cursor.isPotentialDirtyRead()).isTrue(); + } + cursor.close(); + } + + @ParameterizedTest + @MethodSource("arangos") + void queryAllowRetry(ArangoDB arangoDB) throws IOException { + assumeTrue(isAtLeastVersion(3, 11)); + final ArangoCursor cursor = arangoDB.db() + .query("for i in 1..2 return i", String.class, new AqlQueryOptions().allowRetry(true).batchSize(1)); + assertThat(cursor.asListRemaining()).containsExactly("1", "2"); + } + + @ParameterizedTest + @MethodSource("arangos") + void queryAllowRetryClose(ArangoDB arangoDB) throws IOException { + assumeTrue(isAtLeastVersion(3, 11)); + final ArangoCursor cursor = arangoDB.db() + .query("for i in 1..2 return i", String.class, new AqlQueryOptions().allowRetry(true).batchSize(1)); + assertThat(cursor.hasNext()).isTrue(); + assertThat(cursor.next()).isEqualTo("1"); + assertThat(cursor.hasNext()).isTrue(); + assertThat(cursor.next()).isEqualTo("2"); + assertThat(cursor.hasNext()).isFalse(); + cursor.close(); + } + + @ParameterizedTest + @MethodSource("arangos") + void queryAllowRetryCloseBeforeLatestBatch(ArangoDB arangoDB) throws IOException { + assumeTrue(isAtLeastVersion(3, 11)); + final ArangoCursor cursor = arangoDB.db() + .query("for i in 1..2 return i", String.class, new AqlQueryOptions().allowRetry(true).batchSize(1)); + assertThat(cursor.hasNext()).isTrue(); + assertThat(cursor.next()).isEqualTo("1"); + assertThat(cursor.hasNext()).isTrue(); + cursor.close(); + } + + @ParameterizedTest + @MethodSource("arangos") + void queryAllowRetryCloseSingleBatch(ArangoDB arangoDB) throws IOException { + assumeTrue(isAtLeastVersion(3, 11)); + final ArangoCursor cursor = arangoDB.db() + .query("for i in 1..2 return i", String.class, new AqlQueryOptions().allowRetry(true)); + assertThat(cursor.hasNext()).isTrue(); + assertThat(cursor.next()).isEqualTo("1"); + assertThat(cursor.hasNext()).isTrue(); + assertThat(cursor.next()).isEqualTo("2"); + assertThat(cursor.hasNext()).isFalse(); + cursor.close(); + } + + private String getExplainQuery(ArangoDatabase db) { + ArangoCollection character = db.collection("got_characters"); + ArangoCollection actor = db.collection("got_actors"); + + if (!character.exists()) + character.create(); + + if (!actor.exists()) + actor.create(); + + return "FOR `character` IN `got_characters` " + + " FOR `actor` IN `got_actors` " + + " FILTER `actor`.`_id` == @myId" + + " FILTER `character`.`actor` == `actor`.`_id` " + + " FILTER `character`.`value` != 1/0 " + + " RETURN {`character`, `actor`}"; + } + + void checkExecutionPlan(AqlExecutionExplainEntity.ExecutionPlan plan) { + assertThat(plan).isNotNull(); + assertThat(plan.getEstimatedNrItems()) + .isNotNull() + .isNotNegative(); + assertThat(plan.getNodes()).isNotEmpty(); + + AqlExecutionExplainEntity.ExecutionNode node = plan.getNodes().iterator().next(); + assertThat(node.getEstimatedCost()).isNotNull(); + + assertThat(plan.getEstimatedCost()).isNotNull().isNotNegative(); + assertThat(plan.getCollections()).isNotEmpty(); + + AqlExecutionExplainEntity.ExecutionCollection collection = plan.getCollections().iterator().next(); + assertThat(collection.getName()) + .isNotNull() + .isNotEmpty(); + + assertThat(plan.getRules()).isNotEmpty(); + assertThat(plan.getVariables()).isNotEmpty(); + + AqlExecutionExplainEntity.ExecutionVariable variable = plan.getVariables().iterator().next(); + assertThat(variable.getName()) + .isNotNull() + .isNotEmpty(); + } + + @SuppressWarnings("deprecation") + @ParameterizedTest + @MethodSource("dbs") + void explainQuery(ArangoDatabase db) { + AqlExecutionExplainEntity explain = db.explainQuery( + getExplainQuery(db), + Collections.singletonMap("myId", "123"), + new AqlQueryExplainOptions()); + assertThat(explain).isNotNull(); + + checkExecutionPlan(explain.getPlan()); + assertThat(explain.getPlans()).isNull(); + assertThat(explain.getWarnings()).isNotEmpty(); + + CursorWarning warning = explain.getWarnings().iterator().next(); + assertThat(warning).isNotNull(); + assertThat(warning.getCode()).isEqualTo(1562); + assertThat(warning.getMessage()).contains("division by zero"); + + assertThat(explain.getStats()).isNotNull(); + + assertThat(explain.getStats().getExecutionTime()) + .isNotNull() + .isPositive(); + + assertThat(explain.getCacheable()).isFalse(); + } + + @SuppressWarnings("deprecation") + @ParameterizedTest + @MethodSource("dbs") + void explainQueryAllPlans(ArangoDatabase db) { + AqlExecutionExplainEntity explain = db.explainQuery( + getExplainQuery(db), + Collections.singletonMap("myId", "123"), + new AqlQueryExplainOptions().allPlans(true)); + assertThat(explain).isNotNull(); + + assertThat(explain.getPlan()).isNull(); + assertThat(explain.getPlans()).allSatisfy(this::checkExecutionPlan); + assertThat(explain.getWarnings()).isNotEmpty(); + + CursorWarning warning = explain.getWarnings().iterator().next(); + assertThat(warning).isNotNull(); + assertThat(warning.getCode()).isEqualTo(1562); + assertThat(warning.getMessage()).contains("division by zero"); + + assertThat(explain.getStats()).isNotNull(); + + assertThat(explain.getStats().getExecutionTime()) + .isNotNull() + .isPositive(); + + assertThat(explain.getCacheable()).isNull(); + } + + void checkUntypedExecutionPlan(AqlQueryExplainEntity.ExecutionPlan plan) { + assertThat(plan).isNotNull(); + assertThat(plan.get("estimatedNrItems")) + .isInstanceOf(Integer.class) + .asInstanceOf(INTEGER) + .isNotNull() + .isNotNegative(); + assertThat(plan.getNodes()).isNotEmpty(); + + AqlQueryExplainEntity.ExecutionNode node = plan.getNodes().iterator().next(); + assertThat(node.get("estimatedCost")).isNotNull(); + + assertThat(plan.getEstimatedCost()).isNotNull().isNotNegative(); + assertThat(plan.getCollections()).isNotEmpty(); + + AqlQueryExplainEntity.ExecutionCollection collection = plan.getCollections().iterator().next(); + assertThat(collection.get("name")) + .isInstanceOf(String.class) + .asInstanceOf(STRING) + .isNotNull() + .isNotEmpty(); + + assertThat(plan.getRules()).isNotEmpty(); + assertThat(plan.getVariables()).isNotEmpty(); + + AqlQueryExplainEntity.ExecutionVariable variable = plan.getVariables().iterator().next(); + assertThat(variable.get("name")) + .isInstanceOf(String.class) + .asInstanceOf(STRING) + .isNotNull() + .isNotEmpty(); + } + + @ParameterizedTest + @MethodSource("dbs") + void explainAqlQuery(ArangoDatabase db) { + AqlQueryExplainEntity explain = db.explainAqlQuery( + getExplainQuery(db), + Collections.singletonMap("myId", "123"), + new ExplainAqlQueryOptions()); + assertThat(explain).isNotNull(); + + checkUntypedExecutionPlan(explain.getPlan()); + assertThat(explain.getPlans()).isNull(); + assertThat(explain.getWarnings()).isNotEmpty(); + + CursorWarning warning = explain.getWarnings().iterator().next(); + assertThat(warning).isNotNull(); + assertThat(warning.getCode()).isEqualTo(1562); + assertThat(warning.getMessage()).contains("division by zero"); + + assertThat(explain.getStats()).isNotNull(); + + assertThat(explain.getStats().get("executionTime")) + .isInstanceOf(Double.class) + .asInstanceOf(DOUBLE) + .isNotNull() + .isPositive(); + + assertThat(explain.getCacheable()).isFalse(); + } + + @ParameterizedTest + @MethodSource("dbs") + void explainAqlQueryAllPlans(ArangoDatabase db) { + AqlQueryExplainEntity explain = db.explainAqlQuery( + getExplainQuery(db), + Collections.singletonMap("myId", "123"), + new ExplainAqlQueryOptions().allPlans(true)); + assertThat(explain).isNotNull(); + + assertThat(explain.getPlan()).isNull(); + assertThat(explain.getPlans()).allSatisfy(this::checkUntypedExecutionPlan); + assertThat(explain.getWarnings()).isNotEmpty(); + + CursorWarning warning = explain.getWarnings().iterator().next(); + assertThat(warning).isNotNull(); + assertThat(warning.getCode()).isEqualTo(1562); + assertThat(warning.getMessage()).contains("division by zero"); + + assertThat(explain.getStats()).isNotNull(); + + assertThat(explain.getStats().get("executionTime")) + .isInstanceOf(Double.class) + .asInstanceOf(DOUBLE) + .isNotNull() + .isPositive(); + + assertThat(explain.getCacheable()).isNull(); + } + + @ParameterizedTest + @MethodSource("dbs") + void explainAqlQueryAllPlansCustomOption(ArangoDatabase db) { + AqlQueryExplainEntity explain = db.explainAqlQuery( + getExplainQuery(db), + Collections.singletonMap("myId", "123"), + new ExplainAqlQueryOptions().customOption("allPlans", true)); + assertThat(explain).isNotNull(); + + assertThat(explain.getPlan()).isNull(); + assertThat(explain.getPlans()).allSatisfy(this::checkUntypedExecutionPlan); + assertThat(explain.getWarnings()).isNotEmpty(); + + CursorWarning warning = explain.getWarnings().iterator().next(); + assertThat(warning).isNotNull(); + assertThat(warning.getCode()).isEqualTo(1562); + assertThat(warning.getMessage()).contains("division by zero"); + + assertThat(explain.getStats()).isNotNull(); + + assertThat(explain.getStats().get("executionTime")) + .isInstanceOf(Double.class) + .asInstanceOf(DOUBLE) + .isNotNull() + .isPositive(); + + assertThat(explain.getCacheable()).isNull(); + } + + @ParameterizedTest + @MethodSource("dbs") + void parseQuery(ArangoDatabase db) { + final AqlParseEntity parse = db.parseQuery("for i in 1..1 return i"); + assertThat(parse).isNotNull(); + assertThat(parse.getBindVars()).isEmpty(); + assertThat(parse.getCollections()).isEmpty(); + assertThat(parse.getAst()).hasSize(1); + } + + @SlowTest + @ParameterizedTest + @MethodSource("dbs") + void getCurrentlyRunningQueries(ArangoDatabase db) throws InterruptedException { + String query = "return sleep(1)"; + Thread t = new Thread(() -> db.query(query, Void.class)); + t.start(); + Thread.sleep(300); + final Collection currentlyRunningQueries = db.getCurrentlyRunningQueries(); + assertThat(currentlyRunningQueries).hasSize(1); + final QueryEntity queryEntity = currentlyRunningQueries.iterator().next(); + assertThat(queryEntity.getId()).isNotNull(); + assertThat(queryEntity.getDatabase()).isEqualTo(db.name()); + assertThat(queryEntity.getUser()).isEqualTo("root"); + assertThat(queryEntity.getQuery()).isEqualTo(query); + assertThat(queryEntity.getBindVars()).isEmpty(); + assertThat(queryEntity.getStarted()).isInThePast(); + assertThat(queryEntity.getRunTime()).isPositive(); + if (isAtLeastVersion(3, 11)) { + assertThat(queryEntity.getPeakMemoryUsage()).isNotNull(); + } + assertThat(queryEntity.getState()).isEqualTo(QueryExecutionState.EXECUTING); + assertThat(queryEntity.getStream()).isFalse(); + t.join(); + } + + @SlowTest + @ParameterizedTest + @MethodSource("dbs") + void killQuery(ArangoDatabase db) throws InterruptedException, ExecutionException { + ExecutorService es = Executors.newSingleThreadExecutor(); + Future future = es.submit(() -> { + try { + db.query("return sleep(5)", Void.class); + fail(); + } catch (ArangoDBException e) { + assertThat(e.getResponseCode()).isEqualTo(410); + assertThat(e.getErrorNum()).isEqualTo(1500); + assertThat(e.getErrorMessage()).contains("query killed"); + } + }); + Thread.sleep(500); + + Collection currentlyRunningQueries = db.getCurrentlyRunningQueries(); + assertThat(currentlyRunningQueries).hasSize(1); + QueryEntity queryEntity = currentlyRunningQueries.iterator().next(); + assertThat(queryEntity.getState()).isEqualTo(QueryExecutionState.EXECUTING); + db.killQuery(queryEntity.getId()); + + db.getCurrentlyRunningQueries().forEach(q -> + assertThat(q.getState()).isEqualTo(QueryExecutionState.KILLED) + ); + + future.get(); + es.shutdown(); + } + + @SlowTest + @ParameterizedTest + @MethodSource("dbs") + void getAndClearSlowQueries(ArangoDatabase db) { + db.clearSlowQueries(); + + final QueryTrackingPropertiesEntity properties = db.getQueryTrackingProperties(); + final Long slowQueryThreshold = properties.getSlowQueryThreshold(); + properties.setSlowQueryThreshold(1L); + db.setQueryTrackingProperties(properties); + + String query = "return sleep(1.1)"; + db.query(query, Void.class); + final Collection slowQueries = db.getSlowQueries(); + assertThat(slowQueries).hasSize(1); + final QueryEntity queryEntity = slowQueries.iterator().next(); + assertThat(queryEntity.getId()).isNotNull(); + assertThat(queryEntity.getDatabase()).isEqualTo(db.name()); + assertThat(queryEntity.getUser()).isEqualTo("root"); + assertThat(queryEntity.getQuery()).isEqualTo(query); + assertThat(queryEntity.getBindVars()).isEmpty(); + assertThat(queryEntity.getStarted()).isInThePast(); + assertThat(queryEntity.getRunTime()).isPositive(); + if (isAtLeastVersion(3, 11)) { + assertThat(queryEntity.getPeakMemoryUsage()).isNotNull(); + } + assertThat(queryEntity.getState()).isEqualTo(QueryExecutionState.FINISHED); + assertThat(queryEntity.getStream()).isFalse(); + + db.clearSlowQueries(); + assertThat(db.getSlowQueries()).isEmpty(); + properties.setSlowQueryThreshold(slowQueryThreshold); + db.setQueryTrackingProperties(properties); + } + + @ParameterizedTest + @MethodSource("dbs") + void createGetDeleteAqlFunction(ArangoDatabase db) { + final Collection aqlFunctionsInitial = db.getAqlFunctions(null); + assertThat(aqlFunctionsInitial).isEmpty(); + try { + db.createAqlFunction("myfunctions::temperature::celsiustofahrenheit", + "function (celsius) { return celsius * 1.8 + 32; }", null); + + final Collection aqlFunctions = db.getAqlFunctions(null); + assertThat(aqlFunctions).hasSizeGreaterThan(aqlFunctionsInitial.size()); + } finally { + final Integer deleteCount = db.deleteAqlFunction("myfunctions::temperature::celsiustofahrenheit", null); + // compatibility with ArangoDB < 3.4 + if (isAtLeastVersion(3, 4)) { + assertThat(deleteCount).isEqualTo(1); + } else { + assertThat(deleteCount).isNull(); + } + final Collection aqlFunctions = db.getAqlFunctions(null); + assertThat(aqlFunctions).hasSize(aqlFunctionsInitial.size()); + } + } + + @ParameterizedTest + @MethodSource("dbs") + void createGetDeleteAqlFunctionWithNamespace(ArangoDatabase db) { + final Collection aqlFunctionsInitial = db.getAqlFunctions(null); + assertThat(aqlFunctionsInitial).isEmpty(); + try { + db.createAqlFunction("myfunctions::temperature::celsiustofahrenheit1", + "function (celsius) { return celsius * 1.8 + 32; }", null); + db.createAqlFunction("myfunctions::temperature::celsiustofahrenheit2", + "function (celsius) { return celsius * 1.8 + 32; }", null); + + } finally { + final Integer deleteCount = db + .deleteAqlFunction("myfunctions::temperature", new AqlFunctionDeleteOptions().group(true)); + // compatibility with ArangoDB < 3.4 + if (isAtLeastVersion(3, 4)) { + assertThat(deleteCount).isEqualTo(2); + } else { + assertThat(deleteCount).isNull(); + } + final Collection aqlFunctions = db.getAqlFunctions(null); + assertThat(aqlFunctions).hasSize(aqlFunctionsInitial.size()); + } + } + + @ParameterizedTest + @MethodSource("dbs") + void createGraph(ArangoDatabase db) { + String name = "graph-" + rnd(); + final GraphEntity result = db.createGraph(name, null, null); + assertThat(result.getName()).isEqualTo(name); + } + + @ParameterizedTest + @MethodSource("dbs") + void createGraphSatellite(ArangoDatabase db) { + assumeTrue(isAtLeastVersion(3, 7)); + assumeTrue(isCluster()); + assumeTrue(isEnterprise()); + + String name = "graph-" + rnd(); + final GraphEntity result = db.createGraph(name, null, new GraphCreateOptions().replicationFactor(ReplicationFactor.ofSatellite())); + assertThat(result.getReplicationFactor()).isEqualTo(ReplicationFactor.ofSatellite()); + + GraphEntity info = db.graph(name).getInfo(); + assertThat(info.getReplicationFactor()).isEqualTo(ReplicationFactor.ofSatellite()); + + GraphEntity graph = db.getGraphs().stream().filter(g -> name.equals(g.getName())).findFirst().get(); + assertThat(graph.getReplicationFactor()).isEqualTo(ReplicationFactor.ofSatellite()); + } + + @ParameterizedTest + @MethodSource("dbs") + void createGraphReplicationFaktor(ArangoDatabase db) { + assumeTrue(isCluster()); + String name = "graph-" + rnd(); + final String edgeCollection = rndName(); + final String fromCollection = rndName(); + final String toCollection = rndName(); + final Collection edgeDefinitions = + Collections.singletonList(new EdgeDefinition().collection(edgeCollection).from(fromCollection).to(toCollection)); + final GraphEntity result = db.createGraph(name, edgeDefinitions, new GraphCreateOptions().replicationFactor(2)); + assertThat(result).isNotNull(); + for (final String collection : Arrays.asList(edgeCollection, fromCollection, toCollection)) { + final CollectionPropertiesEntity properties = db.collection(collection).getProperties(); + assertThat(properties.getReplicationFactor().get()).isEqualTo(2); + } + } + + @ParameterizedTest + @MethodSource("dbs") + void createGraphNumberOfShards(ArangoDatabase db) { + assumeTrue(isCluster()); + String name = "graph-" + rnd(); + final String edgeCollection = rndName(); + final String fromCollection = rndName(); + final String toCollection = rndName(); + final Collection edgeDefinitions = + Collections.singletonList(new EdgeDefinition().collection(edgeCollection).from(fromCollection).to(toCollection)); + final GraphEntity result = db + .createGraph(name, edgeDefinitions, new GraphCreateOptions().numberOfShards(2)); + assertThat(result).isNotNull(); + for (final String collection : Arrays.asList(edgeCollection, fromCollection, toCollection)) { + final CollectionPropertiesEntity properties = db.collection(collection).getProperties(); + assertThat(properties.getNumberOfShards()).isEqualTo(2); + } + } + + @ParameterizedTest + @MethodSource("dbs") + void getGraphs(ArangoDatabase db) { + String name = "graph-" + rnd(); + db.createGraph(name, null, null); + final Collection graphs = db.getGraphs(); + assertThat(graphs).hasSizeGreaterThanOrEqualTo(1); + long count = graphs.stream().map(GraphEntity::getName).filter(name::equals).count(); + assertThat(count).isEqualTo(1L); + } + + @ParameterizedTest + @MethodSource("dbs") + void transactionString(ArangoDatabase db) { + final TransactionOptions options = new TransactionOptions().params("test"); + final RawJson result = db.transaction("function (params) {return params;}", RawJson.class, options); + assertThat(result.get()).isEqualTo("\"test\""); + } + + @ParameterizedTest + @MethodSource("dbs") + void transactionNumber(ArangoDatabase db) { + final TransactionOptions options = new TransactionOptions().params(5); + final Integer result = db.transaction("function (params) {return params;}", Integer.class, options); + assertThat(result).isEqualTo(5); + } + + @ParameterizedTest + @MethodSource("dbs") + void transactionJsonNode(ArangoDatabase db) { + final TransactionOptions options = new TransactionOptions().params(JsonNodeFactory.instance.textNode("test")); + final JsonNode result = db.transaction("function (params) {return params;}", JsonNode.class, options); + assertThat(result.isTextual()).isTrue(); + assertThat(result.asText()).isEqualTo("test"); + } + + @ParameterizedTest + @MethodSource("dbs") + void transactionJsonObject(ArangoDatabase db) { + ObjectNode params = JsonNodeFactory.instance.objectNode().put("foo", "hello").put("bar", "world"); + final TransactionOptions options = new TransactionOptions().params(params); + final RawJson result = db + .transaction("function (params) { return params['foo'] + ' ' + params['bar'];}", RawJson.class, + options); + assertThat(result.get()).isEqualTo("\"hello world\""); + } + + @ParameterizedTest + @MethodSource("dbs") + void transactionJsonArray(ArangoDatabase db) { + ArrayNode params = JsonNodeFactory.instance.arrayNode().add("hello").add("world"); + final TransactionOptions options = new TransactionOptions().params(params); + final RawJson result = db + .transaction("function (params) { return params[0] + ' ' + params[1];}", RawJson.class, options); + assertThat(result.get()).isEqualTo("\"hello world\""); + } + + @ParameterizedTest + @MethodSource("dbs") + void transactionMap(ArangoDatabase db) { + final Map params = new MapBuilder().put("foo", "hello").put("bar", "world").get(); + final TransactionOptions options = new TransactionOptions().params(params); + final RawJson result = db + .transaction("function (params) { return params['foo'] + ' ' + params['bar'];}", RawJson.class, + options); + assertThat(result.get()).isEqualTo("\"hello world\""); + } + + @ParameterizedTest + @MethodSource("dbs") + void transactionArray(ArangoDatabase db) { + final String[] params = new String[]{"hello", "world"}; + final TransactionOptions options = new TransactionOptions().params(params); + final RawJson result = db + .transaction("function (params) { return params[0] + ' ' + params[1];}", RawJson.class, options); + assertThat(result.get()).isEqualTo("\"hello world\""); + } + + @ParameterizedTest + @MethodSource("dbs") + void transactionCollection(ArangoDatabase db) { + final Collection params = new ArrayList<>(); + params.add("hello"); + params.add("world"); + final TransactionOptions options = new TransactionOptions().params(params); + final RawJson result = db + .transaction("function (params) { return params[0] + ' ' + params[1];}", RawJson.class, options); + assertThat(result.get()).isEqualTo("\"hello world\""); + } + + @ParameterizedTest + @MethodSource("dbs") + void transactionInsertJson(ArangoDatabase db) { + String key = "key-" + rnd(); + final TransactionOptions options = new TransactionOptions().params("{\"_key\":\"" + key + "\"}") + .writeCollections(CNAME1); + db.transaction("function (params) { " + + "var db = require('internal').db;" + + "db." + CNAME1 + ".save(JSON.parse(params));" + + "}", Void.class, options); + assertThat(db.collection(CNAME1).getDocument(key, RawJson.class)).isNotNull(); + } + + @ParameterizedTest + @MethodSource("dbs") + void transactionExclusiveWrite(ArangoDatabase db) { + assumeTrue(isAtLeastVersion(3, 4)); + String key = "key-" + rnd(); + final TransactionOptions options = new TransactionOptions().params("{\"_key\":\"" + key + "\"}") + .exclusiveCollections(CNAME1); + db.transaction("function (params) { " + + "var db = require('internal').db;" + + "db." + CNAME1 + ".save(JSON.parse(params));" + + "}", Void.class, options); + assertThat(db.collection(CNAME1).getDocument(key, RawJson.class)).isNotNull(); + } + + @ParameterizedTest + @MethodSource("dbs") + void transactionEmpty(ArangoDatabase db) { + db.transaction("function () {}", Void.class, null); + } + + @ParameterizedTest + @MethodSource("dbs") + void transactionAllowImplicit(ArangoDatabase db) { + final String action = "function (params) {" + "var db = require('internal').db;" + + "return {'a':db." + CNAME1 + ".all().toArray()[0], 'b':db." + CNAME2 + ".all().toArray()[0]};" + + "}"; + final TransactionOptions options = new TransactionOptions().readCollections(CNAME1); + db.transaction(action, JsonNode.class, options); + options.allowImplicit(false); + Throwable thrown = catchThrowable(() -> db.transaction(action, JsonNode.class, options)); + assertThat(thrown) + .isInstanceOf(ArangoDBException.class) + .extracting(it -> ((ArangoDBException) it).getResponseCode()) + .isEqualTo(400); + } + + @ParameterizedTest + @MethodSource("dbs") + void transactionPojoReturn(ArangoDatabase db) { + final String action = "function() { return {'value':'hello world'}; }"; + final TransactionTestEntity res = db.transaction(action, TransactionTestEntity.class, new TransactionOptions()); + assertThat(res).isNotNull(); + assertThat(res.value).isEqualTo("hello world"); + } + + @ParameterizedTest + @MethodSource("dbs") + void getInfo(ArangoDatabase db) { + final DatabaseEntity info = db.getInfo(); + assertThat(info).isNotNull(); + assertThat(info.getId()).isNotNull(); + assertThat(info.getName()).isEqualTo(getTestDb()); + assertThat(info.getPath()).isNotNull(); + assertThat(info.getIsSystem()).isFalse(); + + if (isAtLeastVersion(3, 6) && isCluster()) { + assertThat(info.getSharding()).isNotNull(); + assertThat(info.getWriteConcern()).isNotNull(); + assertThat(info.getReplicationFactor()).isNotNull(); + } + } + + @ParameterizedTest + @MethodSource("dbs") + void shouldIncludeExceptionMessage(ArangoDatabase db) { + assumeTrue(isAtLeastVersion(3, 4)); + + final String exceptionMessage = "My error context"; + final String action = "function (params) {" + "throw '" + exceptionMessage + "';" + "}"; + try { + db.transaction(action, Void.class, null); + fail(); + } catch (final ArangoDBException e) { + assertThat(e.getErrorMessage()).isEqualTo(exceptionMessage); + } + } + + @ParameterizedTest + @MethodSource("dbs") + void reloadRouting(ArangoDatabase db) { + db.reloadRouting(); + } + + public static class TransactionTestEntity { + private String value; + + public String getValue() { + return value; + } + + public void setValue(String value) { + this.value = value; + } + } +} diff --git a/test-functional/src/test/java/com/arangodb/ArangoDocumentUtilTest.java b/test-functional/src/test/java/com/arangodb/ArangoDocumentUtilTest.java new file mode 100644 index 000000000..8edb602b5 --- /dev/null +++ b/test-functional/src/test/java/com/arangodb/ArangoDocumentUtilTest.java @@ -0,0 +1,85 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb; + +import com.arangodb.internal.util.DocumentUtil; +import com.arangodb.util.TestUtils; +import org.junit.jupiter.api.Test; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.catchThrowable; + +/** + * @author Mark Vollmary + * @author Michele Rastelli + */ +class ArangoDocumentUtilTest { + + @Test + void validateDocumentKeyValid() { + checkDocumentKey("1test"); + checkDocumentKey("test1"); + checkDocumentKey("test-1"); + checkDocumentKey("test_1"); + checkDocumentKey("_test"); + } + + @Test + void validateDocumentKeyInvalidSlash() { + Throwable thrown = catchThrowable(() -> checkDocumentKey("test/test")); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + } + + @Test + void validateDocumentKeyEmpty() { + Throwable thrown = catchThrowable(() -> checkDocumentKey("")); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + } + + private void checkDocumentKey(final String key) throws ArangoDBException { + DocumentUtil.validateDocumentKey(key); + } + + @Test + void validateDocumentIdValid() { + checkDocumentId(TestUtils.generateRandomName(true, 100) + "/1test"); + checkDocumentId(TestUtils.generateRandomName(true, 100) + "/test1"); + checkDocumentId(TestUtils.generateRandomName(true, 100) + "/test-1"); + checkDocumentId(TestUtils.generateRandomName(true, 100) + "/test_1"); + checkDocumentId(TestUtils.generateRandomName(true, 100) + "/_test"); + } + + @Test + void validateDocumentIdInvalidWithoutSlash() { + Throwable thrown = catchThrowable(() -> checkDocumentId("test")); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + } + + @Test + void validateDocumentIdEmpty() { + Throwable thrown = catchThrowable(() -> checkDocumentId("")); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + } + + private void checkDocumentId(final String id) throws ArangoDBException { + DocumentUtil.validateDocumentId(id); + } +} diff --git a/test-functional/src/test/java/com/arangodb/ArangoEdgeCollectionAsyncTest.java b/test-functional/src/test/java/com/arangodb/ArangoEdgeCollectionAsyncTest.java new file mode 100644 index 000000000..16b598936 --- /dev/null +++ b/test-functional/src/test/java/com/arangodb/ArangoEdgeCollectionAsyncTest.java @@ -0,0 +1,446 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb; + +import com.arangodb.entity.*; +import com.arangodb.model.*; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Named; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; + +import java.util.Arrays; +import java.util.Collections; +import java.util.UUID; +import java.util.concurrent.ExecutionException; +import java.util.stream.Stream; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.catchThrowable; + + +/** + * @author Mark Vollmary + * @author Michele Rastelli + */ +class ArangoEdgeCollectionAsyncTest extends BaseJunit5 { + + private static final String GRAPH_NAME = "EdgeCollectionTest_graph"; + private static final String VERTEX_COLLECTION_NAME = rndName(); + private static final String EDGE_COLLECTION_NAME = rndName(); + + private static Stream asyncArgs() { + return asyncDbsStream() + .map(it -> new Object[]{ + Named.of(it.getName(), it.getPayload().graph(GRAPH_NAME).vertexCollection(VERTEX_COLLECTION_NAME)), + Named.of(it.getName(), it.getPayload().graph(GRAPH_NAME).edgeCollection(EDGE_COLLECTION_NAME)) + }) + .map(Arguments::of); + } + + @BeforeAll + static void init() { + initCollections(VERTEX_COLLECTION_NAME); + initEdgeCollections(EDGE_COLLECTION_NAME); + initGraph( + GRAPH_NAME, + Collections.singletonList(new EdgeDefinition() + .collection(EDGE_COLLECTION_NAME) + .from(VERTEX_COLLECTION_NAME) + .to(VERTEX_COLLECTION_NAME) + ), + null + ); + } + + private BaseEdgeDocument createEdgeValue(ArangoVertexCollectionAsync vertices) throws ExecutionException, InterruptedException { + final VertexEntity v1 = vertices.insertVertex(new BaseDocument()).get(); + final VertexEntity v2 = vertices.insertVertex(new BaseDocument()).get(); + + final BaseEdgeDocument value = new BaseEdgeDocument(); + value.setFrom(v1.getId()); + value.setTo(v2.getId()); + return value; + } + + @ParameterizedTest(name = "{1}") + @MethodSource("asyncArgs") + void insertEdge(ArangoVertexCollectionAsync vertices, ArangoEdgeCollectionAsync edges) throws ExecutionException, InterruptedException { + final BaseEdgeDocument value = createEdgeValue(vertices); + final EdgeEntity edge = edges.insertEdge(value).get(); + assertThat(edge).isNotNull(); + final BaseEdgeDocument document = edges.getEdge(edge.getKey(), BaseEdgeDocument.class).get(); + assertThat(document).isNotNull(); + assertThat(document.getKey()).isEqualTo(edge.getKey()); + assertThat(document.getFrom()).isNotNull(); + assertThat(document.getTo()).isNotNull(); + } + + @ParameterizedTest(name = "{1}") + @MethodSource("asyncArgs") + void insertEdgeUpdateRev(ArangoVertexCollectionAsync vertices, ArangoEdgeCollectionAsync edges) throws ExecutionException, InterruptedException { + final BaseEdgeDocument value = createEdgeValue(vertices); + final EdgeEntity edge = edges.insertEdge(value).get(); + assertThat(value.getRevision()).isNull(); + assertThat(edge.getRev()).isNotNull(); + } + + @ParameterizedTest(name = "{1}") + @MethodSource("asyncArgs") + void insertEdgeViolatingUniqueConstraint(ArangoVertexCollectionAsync vertices, ArangoEdgeCollectionAsync edges) throws ExecutionException, InterruptedException { + edges.graph().db().collection(EDGE_COLLECTION_NAME) + .ensurePersistentIndex(Arrays.asList("_from", "_to"), new PersistentIndexOptions().unique(true)).get(); + + BaseEdgeDocument edge = createEdgeValue(vertices); + edges.insertEdge(edge).get(); + + Throwable t = catchThrowable(() -> edges.insertEdge(edge).get()).getCause(); + assertThat(t).isInstanceOf(ArangoDBException.class); + ArangoDBException e = (ArangoDBException) t; + assertThat(e.getResponseCode()).isEqualTo(409); + assertThat(e.getErrorNum()).isEqualTo(1210); + } + + @ParameterizedTest(name = "{1}") + @MethodSource("asyncArgs") + void getEdge(ArangoVertexCollectionAsync vertices, ArangoEdgeCollectionAsync edges) throws ExecutionException, InterruptedException { + final BaseEdgeDocument value = createEdgeValue(vertices); + final EdgeEntity edge = edges.insertEdge(value).get(); + final BaseEdgeDocument document = edges + .getEdge(edge.getKey(), BaseEdgeDocument.class).get(); + assertThat(document).isNotNull(); + assertThat(document.getKey()).isEqualTo(edge.getKey()); + assertThat(document.getFrom()).isNotNull(); + assertThat(document.getTo()).isNotNull(); + } + + @ParameterizedTest(name = "{1}") + @MethodSource("asyncArgs") + void getEdgeIfMatch(ArangoVertexCollectionAsync vertices, ArangoEdgeCollectionAsync edges) throws ExecutionException, InterruptedException { + final BaseEdgeDocument value = createEdgeValue(vertices); + final EdgeEntity edge = edges.insertEdge(value).get(); + final GraphDocumentReadOptions options = new GraphDocumentReadOptions().ifMatch(edge.getRev()); + final BaseDocument document = edges.getEdge(edge.getKey(), + BaseDocument.class, options).get(); + assertThat(document).isNotNull(); + assertThat(document.getKey()).isEqualTo(edge.getKey()); + } + + @ParameterizedTest(name = "{1}") + @MethodSource("asyncArgs") + void getEdgeIfMatchFail(ArangoVertexCollectionAsync vertices, ArangoEdgeCollectionAsync edges) throws ExecutionException, InterruptedException { + final BaseEdgeDocument value = createEdgeValue(vertices); + final EdgeEntity edge = edges.insertEdge(value).get(); + final GraphDocumentReadOptions options = new GraphDocumentReadOptions().ifMatch("no"); + final BaseEdgeDocument edge2 = edges.getEdge(edge.getKey(), + BaseEdgeDocument.class, options).get(); + assertThat(edge2).isNull(); + } + + @ParameterizedTest(name = "{1}") + @MethodSource("asyncArgs") + void getEdgeIfNoneMatch(ArangoVertexCollectionAsync vertices, ArangoEdgeCollectionAsync edges) throws ExecutionException, InterruptedException { + final BaseEdgeDocument value = createEdgeValue(vertices); + final EdgeEntity edge = edges.insertEdge(value).get(); + final GraphDocumentReadOptions options = new GraphDocumentReadOptions().ifNoneMatch("no"); + final BaseDocument document = edges.getEdge(edge.getKey(), + BaseDocument.class, options).get(); + assertThat(document).isNotNull(); + assertThat(document.getKey()).isEqualTo(edge.getKey()); + } + + @ParameterizedTest(name = "{1}") + @MethodSource("asyncArgs") + void getEdgeIfNoneMatchFail(ArangoVertexCollectionAsync vertices, ArangoEdgeCollectionAsync edges) throws ExecutionException, InterruptedException { + final BaseEdgeDocument value = createEdgeValue(vertices); + final EdgeEntity edge = edges.insertEdge(value).get(); + final GraphDocumentReadOptions options = new GraphDocumentReadOptions().ifNoneMatch(edge.getRev()); + final BaseEdgeDocument edge2 = edges.getEdge(edge.getKey(), + BaseEdgeDocument.class, options).get(); + assertThat(edge2).isNull(); + } + + @ParameterizedTest(name = "{1}") + @MethodSource("asyncArgs") + void replaceEdge(ArangoVertexCollectionAsync vertices, ArangoEdgeCollectionAsync edges) throws ExecutionException, InterruptedException { + final BaseEdgeDocument doc = createEdgeValue(vertices); + doc.addAttribute("a", "test"); + final EdgeEntity createResult = edges.insertEdge(doc).get(); + doc.removeAttribute("a"); + doc.addAttribute("b", "test"); + final EdgeUpdateEntity replaceResult = edges + .replaceEdge(createResult.getKey(), doc).get(); + assertThat(replaceResult).isNotNull(); + assertThat(replaceResult.getId()).isEqualTo(createResult.getId()); + assertThat(replaceResult.getRev()).isNotEqualTo(replaceResult.getOldRev()); + assertThat(replaceResult.getOldRev()).isEqualTo(createResult.getRev()); + + final BaseEdgeDocument readResult = edges + .getEdge(createResult.getKey(), BaseEdgeDocument.class).get(); + assertThat(readResult.getKey()).isEqualTo(createResult.getKey()); + assertThat(readResult.getRevision()).isEqualTo(replaceResult.getRev()); + assertThat(readResult.getProperties().keySet()).doesNotContain("a"); + assertThat(readResult.getAttribute("b")).isNotNull(); + assertThat(String.valueOf(readResult.getAttribute("b"))).isEqualTo("test"); + } + + @ParameterizedTest(name = "{1}") + @MethodSource("asyncArgs") + void replaceEdgeUpdateRev(ArangoVertexCollectionAsync vertices, ArangoEdgeCollectionAsync edges) throws ExecutionException, InterruptedException { + final BaseEdgeDocument doc = createEdgeValue(vertices); + final EdgeEntity createResult = edges.insertEdge(doc).get(); + final EdgeUpdateEntity replaceResult = edges + .replaceEdge(createResult.getKey(), doc).get(); + assertThat(doc.getRevision()).isNull(); + assertThat(createResult.getRev()).isNotNull(); + assertThat(replaceResult.getRev()) + .isNotNull() + .isNotEqualTo(createResult.getRev()); + } + + @ParameterizedTest(name = "{1}") + @MethodSource("asyncArgs") + void replaceEdgeIfMatch(ArangoVertexCollectionAsync vertices, ArangoEdgeCollectionAsync edges) throws ExecutionException, InterruptedException { + final BaseEdgeDocument doc = createEdgeValue(vertices); + doc.addAttribute("a", "test"); + final EdgeEntity createResult = edges.insertEdge(doc).get(); + doc.removeAttribute("a"); + doc.addAttribute("b", "test"); + final EdgeReplaceOptions options = new EdgeReplaceOptions().ifMatch(createResult.getRev()); + final EdgeUpdateEntity replaceResult = edges + .replaceEdge(createResult.getKey(), doc, options).get(); + assertThat(replaceResult).isNotNull(); + assertThat(replaceResult.getId()).isEqualTo(createResult.getId()); + assertThat(replaceResult.getRev()).isNotEqualTo(replaceResult.getOldRev()); + assertThat(replaceResult.getOldRev()).isEqualTo(createResult.getRev()); + + final BaseEdgeDocument readResult = edges + .getEdge(createResult.getKey(), BaseEdgeDocument.class).get(); + assertThat(readResult.getKey()).isEqualTo(createResult.getKey()); + assertThat(readResult.getRevision()).isEqualTo(replaceResult.getRev()); + assertThat(readResult.getProperties().keySet()).doesNotContain("a"); + assertThat(readResult.getAttribute("b")).isNotNull(); + assertThat(String.valueOf(readResult.getAttribute("b"))).isEqualTo("test"); + } + + @ParameterizedTest(name = "{1}") + @MethodSource("asyncArgs") + void replaceEdgeIfMatchFail(ArangoVertexCollectionAsync vertices, ArangoEdgeCollectionAsync edges) throws ExecutionException, InterruptedException { + final BaseEdgeDocument doc = createEdgeValue(vertices); + doc.addAttribute("a", "test"); + final EdgeEntity createResult = edges.insertEdge(doc).get(); + doc.removeAttribute("a"); + doc.addAttribute("b", "test"); + final EdgeReplaceOptions options = new EdgeReplaceOptions().ifMatch("no"); + Throwable thrown = catchThrowable(() -> edges.replaceEdge(createResult.getKey(), doc, options).get()).getCause(); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + ArangoDBException e = (ArangoDBException) thrown; + assertThat(e.getResponseCode()).isEqualTo(412); + assertThat(e.getErrorNum()).isEqualTo(1200); + } + + @ParameterizedTest(name = "{1}") + @MethodSource("asyncArgs") + void updateEdge(ArangoVertexCollectionAsync vertices, ArangoEdgeCollectionAsync edges) throws ExecutionException, InterruptedException { + final BaseEdgeDocument doc = createEdgeValue(vertices); + doc.addAttribute("a", "test"); + doc.addAttribute("c", "test"); + final EdgeEntity createResult = edges.insertEdge(doc).get(); + doc.updateAttribute("a", "test1"); + doc.addAttribute("b", "test"); + doc.updateAttribute("c", null); + final EdgeUpdateEntity updateResult = edges + .updateEdge(createResult.getKey(), doc).get(); + assertThat(updateResult).isNotNull(); + assertThat(updateResult.getId()).isEqualTo(createResult.getId()); + assertThat(updateResult.getRev()).isNotEqualTo(updateResult.getOldRev()); + assertThat(updateResult.getOldRev()).isEqualTo(createResult.getRev()); + + final BaseEdgeDocument readResult = edges + .getEdge(createResult.getKey(), BaseEdgeDocument.class).get(); + assertThat(readResult.getKey()).isEqualTo(createResult.getKey()); + assertThat(readResult.getAttribute("a")).isNotNull(); + assertThat(String.valueOf(readResult.getAttribute("a"))).isEqualTo("test1"); + assertThat(readResult.getAttribute("b")).isNotNull(); + assertThat(String.valueOf(readResult.getAttribute("b"))).isEqualTo("test"); + assertThat(readResult.getRevision()).isEqualTo(updateResult.getRev()); + assertThat(readResult.getProperties()).containsKey("c"); + } + + @ParameterizedTest(name = "{1}") + @MethodSource("asyncArgs") + void updateEdgeUpdateRev(ArangoVertexCollectionAsync vertices, ArangoEdgeCollectionAsync edges) throws ExecutionException, InterruptedException { + final BaseEdgeDocument doc = createEdgeValue(vertices); + final EdgeEntity createResult = edges.insertEdge(doc).get(); + final EdgeUpdateEntity updateResult = edges + .updateEdge(createResult.getKey(), doc).get(); + assertThat(doc.getRevision()).isNull(); + assertThat(createResult.getRev()).isNotNull(); + assertThat(updateResult.getRev()) + .isNotNull() + .isNotEqualTo(createResult.getRev()); + } + + @ParameterizedTest(name = "{1}") + @MethodSource("asyncArgs") + void updateEdgeIfMatch(ArangoVertexCollectionAsync vertices, ArangoEdgeCollectionAsync edges) throws ExecutionException, InterruptedException { + final BaseEdgeDocument doc = createEdgeValue(vertices); + doc.addAttribute("a", "test"); + doc.addAttribute("c", "test"); + final EdgeEntity createResult = edges.insertEdge(doc).get(); + doc.updateAttribute("a", "test1"); + doc.addAttribute("b", "test"); + doc.updateAttribute("c", null); + final EdgeUpdateOptions options = new EdgeUpdateOptions().ifMatch(createResult.getRev()); + final EdgeUpdateEntity updateResult = edges + .updateEdge(createResult.getKey(), doc, options).get(); + assertThat(updateResult).isNotNull(); + assertThat(updateResult.getId()).isEqualTo(createResult.getId()); + assertThat(updateResult.getRev()).isNotEqualTo(updateResult.getOldRev()); + assertThat(updateResult.getOldRev()).isEqualTo(createResult.getRev()); + + final BaseEdgeDocument readResult = edges + .getEdge(createResult.getKey(), BaseEdgeDocument.class).get(); + assertThat(readResult.getKey()).isEqualTo(createResult.getKey()); + assertThat(readResult.getAttribute("a")).isNotNull(); + assertThat(String.valueOf(readResult.getAttribute("a"))).isEqualTo("test1"); + assertThat(readResult.getAttribute("b")).isNotNull(); + assertThat(String.valueOf(readResult.getAttribute("b"))).isEqualTo("test"); + assertThat(readResult.getRevision()).isEqualTo(updateResult.getRev()); + assertThat(readResult.getProperties()).containsKey("c"); + } + + @ParameterizedTest(name = "{1}") + @MethodSource("asyncArgs") + void updateEdgeIfMatchFail(ArangoVertexCollectionAsync vertices, ArangoEdgeCollectionAsync edges) throws ExecutionException, InterruptedException { + final BaseEdgeDocument doc = createEdgeValue(vertices); + doc.addAttribute("a", "test"); + doc.addAttribute("c", "test"); + final EdgeEntity createResult = edges.insertEdge(doc).get(); + doc.updateAttribute("a", "test1"); + doc.addAttribute("b", "test"); + doc.updateAttribute("c", null); + final EdgeUpdateOptions options = new EdgeUpdateOptions().ifMatch("no"); + Throwable thrown = catchThrowable(() -> edges.updateEdge(createResult.getKey(), doc, options).get()).getCause(); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + ArangoDBException e = (ArangoDBException) thrown; + assertThat(e.getResponseCode()).isEqualTo(412); + assertThat(e.getErrorNum()).isEqualTo(1200); + } + + @ParameterizedTest(name = "{1}") + @MethodSource("asyncArgs") + void updateEdgeKeepNullTrue(ArangoVertexCollectionAsync vertices, ArangoEdgeCollectionAsync edges) throws ExecutionException, InterruptedException { + final BaseEdgeDocument doc = createEdgeValue(vertices); + doc.addAttribute("a", "test"); + final EdgeEntity createResult = edges.insertEdge(doc).get(); + doc.updateAttribute("a", null); + final EdgeUpdateOptions options = new EdgeUpdateOptions().keepNull(true); + final EdgeUpdateEntity updateResult = edges + .updateEdge(createResult.getKey(), doc, options).get(); + assertThat(updateResult).isNotNull(); + assertThat(updateResult.getId()).isEqualTo(createResult.getId()); + assertThat(updateResult.getRev()).isNotEqualTo(updateResult.getOldRev()); + assertThat(updateResult.getOldRev()).isEqualTo(createResult.getRev()); + + final BaseEdgeDocument readResult = edges + .getEdge(createResult.getKey(), BaseEdgeDocument.class).get(); + assertThat(readResult.getKey()).isEqualTo(createResult.getKey()); + assertThat(readResult.getProperties().keySet()).hasSize(6); + assertThat(readResult.getProperties()).containsKey("a"); + } + + @ParameterizedTest(name = "{1}") + @MethodSource("asyncArgs") + void updateEdgeKeepNullFalse(ArangoVertexCollectionAsync vertices, ArangoEdgeCollectionAsync edges) throws ExecutionException, InterruptedException { + final BaseEdgeDocument doc = createEdgeValue(vertices); + doc.addAttribute("a", "test"); + final EdgeEntity createResult = edges.insertEdge(doc).get(); + doc.updateAttribute("a", null); + final EdgeUpdateOptions options = new EdgeUpdateOptions().keepNull(false); + final EdgeUpdateEntity updateResult = edges + .updateEdge(createResult.getKey(), doc, options).get(); + assertThat(updateResult).isNotNull(); + assertThat(updateResult.getId()).isEqualTo(createResult.getId()); + assertThat(updateResult.getRev()).isNotEqualTo(updateResult.getOldRev()); + assertThat(updateResult.getOldRev()).isEqualTo(createResult.getRev()); + + final BaseEdgeDocument readResult = edges + .getEdge(createResult.getKey(), BaseEdgeDocument.class).get(); + assertThat(readResult.getKey()).isEqualTo(createResult.getKey()); + assertThat(readResult.getId()).isEqualTo(createResult.getId()); + assertThat(readResult.getRevision()).isNotNull(); + assertThat(readResult.getProperties().keySet()).doesNotContain("a"); + } + + @ParameterizedTest(name = "{1}") + @MethodSource("asyncArgs") + void deleteEdge(ArangoVertexCollectionAsync vertices, ArangoEdgeCollectionAsync edges) throws ExecutionException, InterruptedException { + final BaseEdgeDocument doc = createEdgeValue(vertices); + final EdgeEntity createResult = edges.insertEdge(doc).get(); + edges.deleteEdge(createResult.getKey()).get(); + final BaseEdgeDocument edge = edges + .getEdge(createResult.getKey(), BaseEdgeDocument.class).get(); + assertThat(edge).isNull(); + } + + @ParameterizedTest(name = "{1}") + @MethodSource("asyncArgs") + void deleteEdgeIfMatch(ArangoVertexCollectionAsync vertices, ArangoEdgeCollectionAsync edges) throws ExecutionException, InterruptedException { + final BaseEdgeDocument doc = createEdgeValue(vertices); + final EdgeEntity createResult = edges.insertEdge(doc).get(); + final EdgeDeleteOptions options = new EdgeDeleteOptions().ifMatch(createResult.getRev()); + edges.deleteEdge(createResult.getKey(), options).get(); + final BaseEdgeDocument edge = edges + .getEdge(createResult.getKey(), BaseEdgeDocument.class).get(); + assertThat(edge).isNull(); + } + + @ParameterizedTest(name = "{1}") + @MethodSource("asyncArgs") + void deleteEdgeIfMatchFail(ArangoVertexCollectionAsync vertices, ArangoEdgeCollectionAsync edges) throws ExecutionException, InterruptedException { + final BaseEdgeDocument doc = createEdgeValue(vertices); + final EdgeEntity createResult = edges.insertEdge(doc).get(); + final EdgeDeleteOptions options = new EdgeDeleteOptions().ifMatch("no"); + Throwable thrown = catchThrowable(() -> edges.deleteEdge(createResult.getKey(), options).get()).getCause(); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + ArangoDBException e = (ArangoDBException) thrown; + assertThat(e.getResponseCode()).isEqualTo(412); + assertThat(e.getErrorNum()).isEqualTo(1200); + } + + @ParameterizedTest(name = "{1}") + @MethodSource("asyncArgs") + void edgeKeyWithSpecialChars(ArangoVertexCollectionAsync vertices, ArangoEdgeCollectionAsync edges) throws ExecutionException, InterruptedException { + final BaseEdgeDocument value = createEdgeValue(vertices); + final String key = "_-:.@()+,=;$!*'%" + UUID.randomUUID(); + value.setKey(key); + final EdgeEntity edge = edges.insertEdge(value).get(); + assertThat(edge).isNotNull(); + final BaseEdgeDocument document = edges.getEdge(edge.getKey(), BaseEdgeDocument.class).get(); + assertThat(document).isNotNull(); + assertThat(document.getKey()).isEqualTo(key); + assertThat(document.getFrom()).isNotNull(); + assertThat(document.getTo()).isNotNull(); + } + +} diff --git a/test-functional/src/test/java/com/arangodb/ArangoEdgeCollectionTest.java b/test-functional/src/test/java/com/arangodb/ArangoEdgeCollectionTest.java new file mode 100644 index 000000000..8be23b67c --- /dev/null +++ b/test-functional/src/test/java/com/arangodb/ArangoEdgeCollectionTest.java @@ -0,0 +1,446 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb; + +import com.arangodb.entity.*; +import com.arangodb.model.*; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Named; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; + +import java.util.Arrays; +import java.util.Collections; +import java.util.UUID; +import java.util.stream.Stream; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.catchThrowable; + + +/** + * @author Mark Vollmary + * @author Michele Rastelli + */ +class ArangoEdgeCollectionTest extends BaseJunit5 { + + private static final String GRAPH_NAME = "EdgeCollectionTest_graph"; + private static final String VERTEX_COLLECTION_NAME = rndName(); + private static final String EDGE_COLLECTION_NAME = rndName(); + + private static Stream args() { + return dbsStream() + .map(it -> new Object[]{ + Named.of(it.getName(), it.getPayload().graph(GRAPH_NAME).vertexCollection(VERTEX_COLLECTION_NAME)), + Named.of(it.getName(), it.getPayload().graph(GRAPH_NAME).edgeCollection(EDGE_COLLECTION_NAME)) + }) + .map(Arguments::of); + } + + @BeforeAll + static void init() { + initCollections(VERTEX_COLLECTION_NAME); + initEdgeCollections(EDGE_COLLECTION_NAME); + initGraph( + GRAPH_NAME, + Collections.singletonList(new EdgeDefinition() + .collection(EDGE_COLLECTION_NAME) + .from(VERTEX_COLLECTION_NAME) + .to(VERTEX_COLLECTION_NAME) + ), + null + ); + } + + private BaseEdgeDocument createEdgeValue(ArangoVertexCollection vertices) { + final VertexEntity v1 = vertices.insertVertex(new BaseDocument()); + final VertexEntity v2 = vertices.insertVertex(new BaseDocument()); + + final BaseEdgeDocument value = new BaseEdgeDocument(); + value.setFrom(v1.getId()); + value.setTo(v2.getId()); + return value; + } + + @ParameterizedTest(name = "{1}") + @MethodSource("args") + void insertEdge(ArangoVertexCollection vertices, ArangoEdgeCollection edges) { + final BaseEdgeDocument value = createEdgeValue(vertices); + final EdgeEntity edge = edges.insertEdge(value); + assertThat(edge).isNotNull(); + final BaseEdgeDocument document = edges.getEdge(edge.getKey(), BaseEdgeDocument.class); + assertThat(document).isNotNull(); + assertThat(document.getKey()).isEqualTo(edge.getKey()); + assertThat(document.getFrom()).isNotNull(); + assertThat(document.getTo()).isNotNull(); + } + + @ParameterizedTest(name = "{1}") + @MethodSource("args") + void insertEdgeUpdateRev(ArangoVertexCollection vertices, ArangoEdgeCollection edges) { + final BaseEdgeDocument value = createEdgeValue(vertices); + final EdgeEntity edge = edges.insertEdge(value); + assertThat(value.getRevision()).isNull(); + assertThat(edge.getRev()).isNotNull(); + } + + @ParameterizedTest(name = "{1}") + @MethodSource("args") + void insertEdgeViolatingUniqueConstraint(ArangoVertexCollection vertices, ArangoEdgeCollection edges) { + edges.graph().db().collection(EDGE_COLLECTION_NAME) + .ensurePersistentIndex(Arrays.asList("_from", "_to"), new PersistentIndexOptions().unique(true)); + + BaseEdgeDocument edge = createEdgeValue(vertices); + edges.insertEdge(edge); + + try { + edges.insertEdge(edge); + } catch (ArangoDBException e) { + assertThat(e.getResponseCode()).isEqualTo(409); + assertThat(e.getErrorNum()).isEqualTo(1210); + } + } + + @ParameterizedTest(name = "{1}") + @MethodSource("args") + void getEdge(ArangoVertexCollection vertices, ArangoEdgeCollection edges) { + final BaseEdgeDocument value = createEdgeValue(vertices); + final EdgeEntity edge = edges.insertEdge(value); + final BaseEdgeDocument document = edges + .getEdge(edge.getKey(), BaseEdgeDocument.class); + assertThat(document).isNotNull(); + assertThat(document.getKey()).isEqualTo(edge.getKey()); + assertThat(document.getFrom()).isNotNull(); + assertThat(document.getTo()).isNotNull(); + } + + @ParameterizedTest(name = "{1}") + @MethodSource("args") + void getEdgeIfMatch(ArangoVertexCollection vertices, ArangoEdgeCollection edges) { + final BaseEdgeDocument value = createEdgeValue(vertices); + final EdgeEntity edge = edges.insertEdge(value); + final GraphDocumentReadOptions options = new GraphDocumentReadOptions().ifMatch(edge.getRev()); + final BaseDocument document = edges.getEdge(edge.getKey(), + BaseDocument.class, options); + assertThat(document).isNotNull(); + assertThat(document.getKey()).isEqualTo(edge.getKey()); + } + + @ParameterizedTest(name = "{1}") + @MethodSource("args") + void getEdgeIfMatchFail(ArangoVertexCollection vertices, ArangoEdgeCollection edges) { + final BaseEdgeDocument value = createEdgeValue(vertices); + final EdgeEntity edge = edges.insertEdge(value); + final GraphDocumentReadOptions options = new GraphDocumentReadOptions().ifMatch("no"); + final BaseEdgeDocument edge2 = edges.getEdge(edge.getKey(), + BaseEdgeDocument.class, options); + assertThat(edge2).isNull(); + } + + @ParameterizedTest(name = "{1}") + @MethodSource("args") + void getEdgeIfNoneMatch(ArangoVertexCollection vertices, ArangoEdgeCollection edges) { + final BaseEdgeDocument value = createEdgeValue(vertices); + final EdgeEntity edge = edges.insertEdge(value); + final GraphDocumentReadOptions options = new GraphDocumentReadOptions().ifNoneMatch("no"); + final BaseDocument document = edges.getEdge(edge.getKey(), + BaseDocument.class, options); + assertThat(document).isNotNull(); + assertThat(document.getKey()).isEqualTo(edge.getKey()); + } + + @ParameterizedTest(name = "{1}") + @MethodSource("args") + void getEdgeIfNoneMatchFail(ArangoVertexCollection vertices, ArangoEdgeCollection edges) { + final BaseEdgeDocument value = createEdgeValue(vertices); + final EdgeEntity edge = edges.insertEdge(value); + final GraphDocumentReadOptions options = new GraphDocumentReadOptions().ifNoneMatch(edge.getRev()); + final BaseEdgeDocument edge2 = edges.getEdge(edge.getKey(), + BaseEdgeDocument.class, options); + assertThat(edge2).isNull(); + } + + @ParameterizedTest(name = "{1}") + @MethodSource("args") + void replaceEdge(ArangoVertexCollection vertices, ArangoEdgeCollection edges) { + final BaseEdgeDocument doc = createEdgeValue(vertices); + doc.addAttribute("a", "test"); + final EdgeEntity createResult = edges.insertEdge(doc); + doc.removeAttribute("a"); + doc.addAttribute("b", "test"); + final EdgeUpdateEntity replaceResult = edges + .replaceEdge(createResult.getKey(), doc); + assertThat(replaceResult).isNotNull(); + assertThat(replaceResult.getId()).isEqualTo(createResult.getId()); + assertThat(replaceResult.getRev()).isNotEqualTo(replaceResult.getOldRev()); + assertThat(replaceResult.getOldRev()).isEqualTo(createResult.getRev()); + + final BaseEdgeDocument readResult = edges + .getEdge(createResult.getKey(), BaseEdgeDocument.class); + assertThat(readResult.getKey()).isEqualTo(createResult.getKey()); + assertThat(readResult.getRevision()).isEqualTo(replaceResult.getRev()); + assertThat(readResult.getProperties().keySet()).doesNotContain("a"); + assertThat(readResult.getAttribute("b")).isNotNull(); + assertThat(String.valueOf(readResult.getAttribute("b"))).isEqualTo("test"); + } + + @ParameterizedTest(name = "{1}") + @MethodSource("args") + void replaceEdgeUpdateRev(ArangoVertexCollection vertices, ArangoEdgeCollection edges) { + final BaseEdgeDocument doc = createEdgeValue(vertices); + final EdgeEntity createResult = edges.insertEdge(doc); + final EdgeUpdateEntity replaceResult = edges + .replaceEdge(createResult.getKey(), doc); + assertThat(doc.getRevision()).isNull(); + assertThat(createResult.getRev()).isNotNull(); + assertThat(replaceResult.getRev()) + .isNotNull() + .isNotEqualTo(createResult.getRev()); + } + + @ParameterizedTest(name = "{1}") + @MethodSource("args") + void replaceEdgeIfMatch(ArangoVertexCollection vertices, ArangoEdgeCollection edges) { + final BaseEdgeDocument doc = createEdgeValue(vertices); + doc.addAttribute("a", "test"); + final EdgeEntity createResult = edges.insertEdge(doc); + doc.removeAttribute("a"); + doc.addAttribute("b", "test"); + final EdgeReplaceOptions options = new EdgeReplaceOptions().ifMatch(createResult.getRev()); + final EdgeUpdateEntity replaceResult = edges + .replaceEdge(createResult.getKey(), doc, options); + assertThat(replaceResult).isNotNull(); + assertThat(replaceResult.getId()).isEqualTo(createResult.getId()); + assertThat(replaceResult.getRev()).isNotEqualTo(replaceResult.getOldRev()); + assertThat(replaceResult.getOldRev()).isEqualTo(createResult.getRev()); + + final BaseEdgeDocument readResult = edges + .getEdge(createResult.getKey(), BaseEdgeDocument.class); + assertThat(readResult.getKey()).isEqualTo(createResult.getKey()); + assertThat(readResult.getRevision()).isEqualTo(replaceResult.getRev()); + assertThat(readResult.getProperties().keySet()).doesNotContain("a"); + assertThat(readResult.getAttribute("b")).isNotNull(); + assertThat(String.valueOf(readResult.getAttribute("b"))).isEqualTo("test"); + } + + @ParameterizedTest(name = "{1}") + @MethodSource("args") + void replaceEdgeIfMatchFail(ArangoVertexCollection vertices, ArangoEdgeCollection edges) { + final BaseEdgeDocument doc = createEdgeValue(vertices); + doc.addAttribute("a", "test"); + final EdgeEntity createResult = edges.insertEdge(doc); + doc.removeAttribute("a"); + doc.addAttribute("b", "test"); + final EdgeReplaceOptions options = new EdgeReplaceOptions().ifMatch("no"); + Throwable thrown = catchThrowable(() -> edges.replaceEdge(createResult.getKey(), doc, options)); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + ArangoDBException e = (ArangoDBException) thrown; + assertThat(e.getResponseCode()).isEqualTo(412); + assertThat(e.getErrorNum()).isEqualTo(1200); + } + + @ParameterizedTest(name = "{1}") + @MethodSource("args") + void updateEdge(ArangoVertexCollection vertices, ArangoEdgeCollection edges) { + final BaseEdgeDocument doc = createEdgeValue(vertices); + doc.addAttribute("a", "test"); + doc.addAttribute("c", "test"); + final EdgeEntity createResult = edges.insertEdge(doc); + doc.updateAttribute("a", "test1"); + doc.addAttribute("b", "test"); + doc.updateAttribute("c", null); + final EdgeUpdateEntity updateResult = edges + .updateEdge(createResult.getKey(), doc); + assertThat(updateResult).isNotNull(); + assertThat(updateResult.getId()).isEqualTo(createResult.getId()); + assertThat(updateResult.getRev()).isNotEqualTo(updateResult.getOldRev()); + assertThat(updateResult.getOldRev()).isEqualTo(createResult.getRev()); + + final BaseEdgeDocument readResult = edges + .getEdge(createResult.getKey(), BaseEdgeDocument.class); + assertThat(readResult.getKey()).isEqualTo(createResult.getKey()); + assertThat(readResult.getAttribute("a")).isNotNull(); + assertThat(String.valueOf(readResult.getAttribute("a"))).isEqualTo("test1"); + assertThat(readResult.getAttribute("b")).isNotNull(); + assertThat(String.valueOf(readResult.getAttribute("b"))).isEqualTo("test"); + assertThat(readResult.getRevision()).isEqualTo(updateResult.getRev()); + assertThat(readResult.getProperties()).containsKey("c"); + } + + @ParameterizedTest(name = "{1}") + @MethodSource("args") + void updateEdgeUpdateRev(ArangoVertexCollection vertices, ArangoEdgeCollection edges) { + final BaseEdgeDocument doc = createEdgeValue(vertices); + final EdgeEntity createResult = edges.insertEdge(doc); + final EdgeUpdateEntity updateResult = edges + .updateEdge(createResult.getKey(), doc); + assertThat(doc.getRevision()).isNull(); + assertThat(createResult.getRev()).isNotNull(); + assertThat(updateResult.getRev()) + .isNotNull() + .isNotEqualTo(createResult.getRev()); + } + + @ParameterizedTest(name = "{1}") + @MethodSource("args") + void updateEdgeIfMatch(ArangoVertexCollection vertices, ArangoEdgeCollection edges) { + final BaseEdgeDocument doc = createEdgeValue(vertices); + doc.addAttribute("a", "test"); + doc.addAttribute("c", "test"); + final EdgeEntity createResult = edges.insertEdge(doc); + doc.updateAttribute("a", "test1"); + doc.addAttribute("b", "test"); + doc.updateAttribute("c", null); + final EdgeUpdateOptions options = new EdgeUpdateOptions().ifMatch(createResult.getRev()); + final EdgeUpdateEntity updateResult = edges + .updateEdge(createResult.getKey(), doc, options); + assertThat(updateResult).isNotNull(); + assertThat(updateResult.getId()).isEqualTo(createResult.getId()); + assertThat(updateResult.getRev()).isNotEqualTo(updateResult.getOldRev()); + assertThat(updateResult.getOldRev()).isEqualTo(createResult.getRev()); + + final BaseEdgeDocument readResult = edges + .getEdge(createResult.getKey(), BaseEdgeDocument.class); + assertThat(readResult.getKey()).isEqualTo(createResult.getKey()); + assertThat(readResult.getAttribute("a")).isNotNull(); + assertThat(String.valueOf(readResult.getAttribute("a"))).isEqualTo("test1"); + assertThat(readResult.getAttribute("b")).isNotNull(); + assertThat(String.valueOf(readResult.getAttribute("b"))).isEqualTo("test"); + assertThat(readResult.getRevision()).isEqualTo(updateResult.getRev()); + assertThat(readResult.getProperties()).containsKey("c"); + } + + @ParameterizedTest(name = "{1}") + @MethodSource("args") + void updateEdgeIfMatchFail(ArangoVertexCollection vertices, ArangoEdgeCollection edges) { + final BaseEdgeDocument doc = createEdgeValue(vertices); + doc.addAttribute("a", "test"); + doc.addAttribute("c", "test"); + final EdgeEntity createResult = edges.insertEdge(doc); + doc.updateAttribute("a", "test1"); + doc.addAttribute("b", "test"); + doc.updateAttribute("c", null); + final EdgeUpdateOptions options = new EdgeUpdateOptions().ifMatch("no"); + Throwable thrown = catchThrowable(() -> edges.updateEdge(createResult.getKey(), doc, options)); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + ArangoDBException e = (ArangoDBException) thrown; + assertThat(e.getResponseCode()).isEqualTo(412); + assertThat(e.getErrorNum()).isEqualTo(1200); + } + + @ParameterizedTest(name = "{1}") + @MethodSource("args") + void updateEdgeKeepNullTrue(ArangoVertexCollection vertices, ArangoEdgeCollection edges) { + final BaseEdgeDocument doc = createEdgeValue(vertices); + doc.addAttribute("a", "test"); + final EdgeEntity createResult = edges.insertEdge(doc); + doc.updateAttribute("a", null); + final EdgeUpdateOptions options = new EdgeUpdateOptions().keepNull(true); + final EdgeUpdateEntity updateResult = edges + .updateEdge(createResult.getKey(), doc, options); + assertThat(updateResult).isNotNull(); + assertThat(updateResult.getId()).isEqualTo(createResult.getId()); + assertThat(updateResult.getRev()).isNotEqualTo(updateResult.getOldRev()); + assertThat(updateResult.getOldRev()).isEqualTo(createResult.getRev()); + + final BaseEdgeDocument readResult = edges + .getEdge(createResult.getKey(), BaseEdgeDocument.class); + assertThat(readResult.getKey()).isEqualTo(createResult.getKey()); + assertThat(readResult.getProperties().keySet()).hasSize(6); + assertThat(readResult.getProperties()).containsKey("a"); + } + + @ParameterizedTest(name = "{1}") + @MethodSource("args") + void updateEdgeKeepNullFalse(ArangoVertexCollection vertices, ArangoEdgeCollection edges) { + final BaseEdgeDocument doc = createEdgeValue(vertices); + doc.addAttribute("a", "test"); + final EdgeEntity createResult = edges.insertEdge(doc); + doc.updateAttribute("a", null); + final EdgeUpdateOptions options = new EdgeUpdateOptions().keepNull(false); + final EdgeUpdateEntity updateResult = edges + .updateEdge(createResult.getKey(), doc, options); + assertThat(updateResult).isNotNull(); + assertThat(updateResult.getId()).isEqualTo(createResult.getId()); + assertThat(updateResult.getRev()).isNotEqualTo(updateResult.getOldRev()); + assertThat(updateResult.getOldRev()).isEqualTo(createResult.getRev()); + + final BaseEdgeDocument readResult = edges + .getEdge(createResult.getKey(), BaseEdgeDocument.class); + assertThat(readResult.getKey()).isEqualTo(createResult.getKey()); + assertThat(readResult.getId()).isEqualTo(createResult.getId()); + assertThat(readResult.getRevision()).isNotNull(); + assertThat(readResult.getProperties().keySet()).doesNotContain("a"); + } + + @ParameterizedTest(name = "{1}") + @MethodSource("args") + void deleteEdge(ArangoVertexCollection vertices, ArangoEdgeCollection edges) { + final BaseEdgeDocument doc = createEdgeValue(vertices); + final EdgeEntity createResult = edges.insertEdge(doc); + edges.deleteEdge(createResult.getKey()); + final BaseEdgeDocument edge = edges + .getEdge(createResult.getKey(), BaseEdgeDocument.class); + assertThat(edge).isNull(); + } + + @ParameterizedTest(name = "{1}") + @MethodSource("args") + void deleteEdgeIfMatch(ArangoVertexCollection vertices, ArangoEdgeCollection edges) { + final BaseEdgeDocument doc = createEdgeValue(vertices); + final EdgeEntity createResult = edges.insertEdge(doc); + final EdgeDeleteOptions options = new EdgeDeleteOptions().ifMatch(createResult.getRev()); + edges.deleteEdge(createResult.getKey(), options); + final BaseEdgeDocument edge = edges + .getEdge(createResult.getKey(), BaseEdgeDocument.class); + assertThat(edge).isNull(); + } + + @ParameterizedTest(name = "{1}") + @MethodSource("args") + void deleteEdgeIfMatchFail(ArangoVertexCollection vertices, ArangoEdgeCollection edges) { + final BaseEdgeDocument doc = createEdgeValue(vertices); + final EdgeEntity createResult = edges.insertEdge(doc); + final EdgeDeleteOptions options = new EdgeDeleteOptions().ifMatch("no"); + Throwable thrown = catchThrowable(() -> edges.deleteEdge(createResult.getKey(), options)); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + ArangoDBException e = (ArangoDBException) thrown; + assertThat(e.getResponseCode()).isEqualTo(412); + assertThat(e.getErrorNum()).isEqualTo(1200); + } + + @ParameterizedTest(name = "{1}") + @MethodSource("args") + void edgeKeyWithSpecialChars(ArangoVertexCollection vertices, ArangoEdgeCollection edges) { + final BaseEdgeDocument value = createEdgeValue(vertices); + final String key = "_-:.@()+,=;$!*'%" + UUID.randomUUID(); + value.setKey(key); + final EdgeEntity edge = edges.insertEdge(value); + assertThat(edge).isNotNull(); + final BaseEdgeDocument document = edges.getEdge(edge.getKey(), BaseEdgeDocument.class); + assertThat(document).isNotNull(); + assertThat(document.getKey()).isEqualTo(key); + assertThat(document.getFrom()).isNotNull(); + assertThat(document.getTo()).isNotNull(); + } + +} diff --git a/test-functional/src/test/java/com/arangodb/ArangoGraphAsyncTest.java b/test-functional/src/test/java/com/arangodb/ArangoGraphAsyncTest.java new file mode 100644 index 000000000..ecda8505f --- /dev/null +++ b/test-functional/src/test/java/com/arangodb/ArangoGraphAsyncTest.java @@ -0,0 +1,495 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb; + +import com.arangodb.entity.CollectionPropertiesEntity; +import com.arangodb.entity.EdgeDefinition; +import com.arangodb.entity.GraphEntity; +import com.arangodb.entity.ReplicationFactor; +import com.arangodb.model.*; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.concurrent.ExecutionException; +import java.util.stream.Stream; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + + +/** + * @author Mark Vollmary + * @author Michele Rastelli + */ +class ArangoGraphAsyncTest extends BaseJunit5 { + + private static final String GRAPH_NAME = "ArangoGraphTest_graph"; + + private static final String VERTEX_COL_1 = rndName(); + private static final String VERTEX_COL_2 = rndName(); + private static final String VERTEX_COL_3 = rndName(); + private static final String VERTEX_COL_4 = rndName(); + private static final String VERTEX_COL_5 = rndName(); + + private static final String EDGE_COL_1 = rndName(); + private static final String EDGE_COL_2 = rndName(); + private static final String EDGE_COL_3 = rndName(); + + private static final Integer REPLICATION_FACTOR = 2; + private static final Integer NUMBER_OF_SHARDS = 2; + + private static final EdgeDefinition ed1 = + new EdgeDefinition().collection(EDGE_COL_1).from(VERTEX_COL_1).to(VERTEX_COL_5); + private static final EdgeDefinition ed2 = + new EdgeDefinition().collection(EDGE_COL_2).from(VERTEX_COL_2).to(VERTEX_COL_1, VERTEX_COL_3); + + private static Stream asyncGraphs() { + return asyncDbsStream() + .map(mapNamedPayload(db -> db.graph(GRAPH_NAME))) + .map(Arguments::of); + } + + @BeforeAll + static void init() { + final Collection edgeDefinitions = Arrays.asList(ed1, ed2); + + final GraphCreateOptions options = new GraphCreateOptions() + .replicationFactor(REPLICATION_FACTOR) + .numberOfShards(NUMBER_OF_SHARDS); + + initGraph(GRAPH_NAME, edgeDefinitions, options); + } + + + @ParameterizedTest + @MethodSource("asyncGraphs") + void exists(ArangoGraphAsync graph) throws ExecutionException, InterruptedException { + assertThat(graph.exists().get()).isTrue(); + assertThat(graph.db().graph(GRAPH_NAME + "no").exists().get()).isFalse(); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void createWithReplicationAndWriteConcern(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isCluster()); + + final Collection edgeDefinitions = new ArrayList<>(); + final GraphEntity graph = db.createGraph(GRAPH_NAME + "_1", edgeDefinitions, + new GraphCreateOptions().isSmart(true).replicationFactor(2).writeConcern(2)).get(); + assertThat(graph).isNotNull(); + assertThat(graph.getName()).isEqualTo(GRAPH_NAME + "_1"); + assertThat(graph.getWriteConcern()).isEqualTo(2); + assertThat(graph.getReplicationFactor().get()).isEqualTo(2); + db.graph(GRAPH_NAME + "_1").drop().get(); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void getGraphs(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + final Collection graphs = db.getGraphs().get(); + assertThat(graphs.stream().anyMatch(it -> it.getName().equals(GRAPH_NAME))).isTrue(); + } + + @ParameterizedTest + @MethodSource("asyncGraphs") + void getInfo(ArangoGraphAsync graph) throws ExecutionException, InterruptedException { + final GraphEntity info = graph.getInfo().get(); + assertThat(info).isNotNull(); + assertThat(info.getName()).isEqualTo(GRAPH_NAME); + assertThat(info.getEdgeDefinitions()).hasSize(2); + + assertThat(info.getEdgeDefinitions()) + .anySatisfy(e1 -> { + assertThat(e1.getCollection()).isEqualTo(EDGE_COL_1); + assertThat(e1.getFrom()).contains(VERTEX_COL_1); + assertThat(e1.getTo()).contains(VERTEX_COL_5); + }) + .anySatisfy(e2 -> { + assertThat(e2.getCollection()).isEqualTo(EDGE_COL_2); + assertThat(e2.getFrom()).contains(VERTEX_COL_2); + assertThat(e2.getTo()).contains(VERTEX_COL_1, VERTEX_COL_3); + }); + + assertThat(info.getOrphanCollections()).isEmpty(); + + if (isCluster()) { + for (final String collection : new String[]{EDGE_COL_1, EDGE_COL_2, VERTEX_COL_1, VERTEX_COL_2, VERTEX_COL_5}) { + final CollectionPropertiesEntity properties = graph.db().collection(collection).getProperties().get(); + assertThat(properties.getReplicationFactor().get()).isEqualTo(REPLICATION_FACTOR); + assertThat(properties.getNumberOfShards()).isEqualTo(NUMBER_OF_SHARDS); + } + } + } + + @ParameterizedTest + @MethodSource("asyncGraphs") + void getVertexCollections(ArangoGraphAsync graph) throws ExecutionException, InterruptedException { + final Collection vertexCollections = graph.getVertexCollections().get(); + assertThat(vertexCollections) + .hasSize(4) + .contains(VERTEX_COL_1, VERTEX_COL_2, VERTEX_COL_3, VERTEX_COL_5); + } + + @ParameterizedTest + @MethodSource("asyncGraphs") + void addVertexCollection(ArangoGraphAsync graph) throws ExecutionException, InterruptedException { + final GraphEntity g = graph.addVertexCollection(VERTEX_COL_4).get(); + assertThat(g).isNotNull(); + final Collection vertexCollections = graph.getVertexCollections().get(); + assertThat(vertexCollections).contains(VERTEX_COL_1, VERTEX_COL_2, VERTEX_COL_3, VERTEX_COL_4, VERTEX_COL_5); + + // revert + graph.vertexCollection(VERTEX_COL_4).remove().get(); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void addSatelliteVertexCollection(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isCluster() || isAtLeastVersion(3, 10)); + assumeTrue(isEnterprise()); + assumeTrue(isAtLeastVersion(3, 9)); + + String v1Name = "vertex-" + rnd(); + + ArangoGraphAsync g = db.graph(GRAPH_NAME + rnd()); + g.create(null, new GraphCreateOptions().isSmart(true).smartGraphAttribute("test")).get(); + g.addVertexCollection(v1Name, new VertexCollectionCreateOptions().satellites(v1Name)).get(); + + Collection vertexCollections = g.getVertexCollections().get(); + assertThat(vertexCollections).contains(v1Name); + assertThat(db.collection(v1Name).getProperties().get().getReplicationFactor()).isEqualTo(ReplicationFactor.ofSatellite()); + + // revert + g.drop().get(); + } + + @ParameterizedTest + @MethodSource("asyncGraphs") + void getEdgeCollections(ArangoGraphAsync graph) throws ExecutionException, InterruptedException { + final Collection edgeCollections = graph.getEdgeDefinitions().get(); + assertThat(edgeCollections) + .hasSize(2) + .contains(EDGE_COL_1, EDGE_COL_2); + } + + @ParameterizedTest + @MethodSource("asyncGraphs") + void addEdgeDefinition(ArangoGraphAsync graph) throws ExecutionException, InterruptedException { + EdgeDefinition ed = new EdgeDefinition().collection(EDGE_COL_3).from(VERTEX_COL_1).to(VERTEX_COL_2); + final GraphEntity g = graph.addEdgeDefinition(ed).get(); + assertThat(g).isNotNull(); + final Collection edgeDefinitions = g.getEdgeDefinitions(); + assertThat(edgeDefinitions).hasSize(3); + int count = 0; + for (final EdgeDefinition e : edgeDefinitions) { + if (e.getCollection().equals(EDGE_COL_3)) { + count++; + } + } + assertThat(count).isEqualTo(1); + for (final EdgeDefinition e : edgeDefinitions) { + if (e.getCollection().equals(EDGE_COL_3)) { + assertThat(e.getFrom()).contains(VERTEX_COL_1); + assertThat(e.getTo()).contains(VERTEX_COL_2); + } + } + if (isCluster()) { + final CollectionPropertiesEntity properties = graph.db().collection(EDGE_COL_3).getProperties().get(); + assertThat(properties.getReplicationFactor().get()).isEqualTo(REPLICATION_FACTOR); + assertThat(properties.getNumberOfShards()).isEqualTo(NUMBER_OF_SHARDS); + } + + // revert + graph.edgeCollection(EDGE_COL_3).remove().get(); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void addSatelliteEdgeDefinition(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isCluster() || isAtLeastVersion(3, 10)); + assumeTrue(isEnterprise()); + assumeTrue(isAtLeastVersion(3, 9)); + + String eName = "edge-" + rnd(); + String v1Name = "vertex-" + rnd(); + String v2Name = "vertex-" + rnd(); + EdgeDefinition ed = new EdgeDefinition().collection(eName).from(v1Name).to(v2Name).satellites(v1Name); + + ArangoGraphAsync g = db.graph(GRAPH_NAME + rnd()); + g.create(null, new GraphCreateOptions().isSmart(true).smartGraphAttribute("test")).get(); + g.addEdgeDefinition(ed).get(); + final GraphEntity ge = g.getInfo().get(); + assertThat(ge).isNotNull(); + final Collection edgeDefinitions = ge.getEdgeDefinitions(); + assertThat(edgeDefinitions).hasSize(1); + EdgeDefinition e = edgeDefinitions.iterator().next(); + assertThat(e.getCollection()).isEqualTo(eName); + assertThat(e.getFrom()).contains(v1Name); + assertThat(e.getTo()).contains(v2Name); + + assertThat(db.collection(v1Name).getProperties().get().getReplicationFactor()).isEqualTo(ReplicationFactor.ofSatellite()); + + // revert + g.drop().get(); + } + + @ParameterizedTest + @MethodSource("asyncGraphs") + void replaceEdgeDefinition(ArangoGraphAsync graph) throws ExecutionException, InterruptedException { + final GraphEntity g = graph + .replaceEdgeDefinition(new EdgeDefinition().collection(EDGE_COL_1).from(VERTEX_COL_3).to(VERTEX_COL_4)).get(); + final Collection edgeDefinitions = g.getEdgeDefinitions(); + assertThat(edgeDefinitions).hasSize(2); + int count = 0; + for (final EdgeDefinition e : edgeDefinitions) { + if (e.getCollection().equals(EDGE_COL_1)) { + count++; + } + } + assertThat(count).isEqualTo(1); + for (final EdgeDefinition e : edgeDefinitions) { + if (e.getCollection().equals(EDGE_COL_1)) { + assertThat(e.getFrom()).contains(VERTEX_COL_3); + assertThat(e.getTo()).contains(VERTEX_COL_4); + } + } + assertThat(graph.db().collection(VERTEX_COL_1).exists().get()).isTrue(); + + // revert + graph.edgeCollection(EDGE_COL_1).remove().get(); + graph.vertexCollection(VERTEX_COL_4).remove().get(); + graph.addEdgeDefinition(ed1).get(); + } + + @ParameterizedTest + @MethodSource("asyncGraphs") + @Disabled + // FIXME: with dropCollections=true the vertex collections remain in the graph as orphan and not dropped + void replaceEdgeDefinitionDropCollections(ArangoGraphAsync graph) throws ExecutionException, InterruptedException { + final GraphEntity g = graph + .replaceEdgeDefinition(new EdgeDefinition().collection(EDGE_COL_1).from(VERTEX_COL_3).to(VERTEX_COL_4), + new ReplaceEdgeDefinitionOptions().waitForSync(true).dropCollections(true)).get(); + final Collection edgeDefinitions = g.getEdgeDefinitions(); + assertThat(edgeDefinitions).hasSize(2); + int count = 0; + for (final EdgeDefinition e : edgeDefinitions) { + if (e.getCollection().equals(EDGE_COL_1)) { + count++; + } + } + assertThat(count).isEqualTo(1); + for (final EdgeDefinition e : edgeDefinitions) { + if (e.getCollection().equals(EDGE_COL_1)) { + assertThat(e.getFrom()).contains(VERTEX_COL_3); + assertThat(e.getTo()).contains(VERTEX_COL_4); + } + } + assertThat(graph.db().collection(VERTEX_COL_5).exists().get()).isFalse(); + + // revert + graph.edgeCollection(EDGE_COL_1).remove().get(); + graph.vertexCollection(VERTEX_COL_4).remove().get(); + graph.addEdgeDefinition(ed1).get(); + } + + @ParameterizedTest + @MethodSource("asyncGraphs") + void removeEdgeDefinition(ArangoGraphAsync graph) throws ExecutionException, InterruptedException { + graph.edgeCollection(EDGE_COL_1).remove().get(); + Collection edgeDefinitions = graph.getEdgeDefinitions().get(); + assertThat(edgeDefinitions).hasSize(1); + assertThat(edgeDefinitions.iterator().next()).isEqualTo(EDGE_COL_2); + assertThat(graph.db().collection(EDGE_COL_1).exists().get()).isTrue(); + + //revert + graph.addEdgeDefinition(ed1).get(); + } + + @ParameterizedTest + @MethodSource("asyncGraphs") + void removeEdgeDefinitionDropCollections(ArangoGraphAsync graph) throws ExecutionException, InterruptedException { + graph.edgeCollection(EDGE_COL_1).remove(new EdgeCollectionRemoveOptions() + .dropCollections(true) + .waitForSync(true)).get(); + Collection edgeDefinitions = graph.getEdgeDefinitions().get(); + assertThat(edgeDefinitions).hasSize(1); + assertThat(edgeDefinitions.iterator().next()).isEqualTo(EDGE_COL_2); + assertThat(graph.db().collection(EDGE_COL_1).exists().get()).isFalse(); + + //revert + graph.addEdgeDefinition(ed1).get(); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void smartGraph(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isEnterprise()); + assumeTrue(isCluster() || isAtLeastVersion(3, 10)); + + final Collection edgeDefinitions = new ArrayList<>(); + edgeDefinitions.add(new EdgeDefinition().collection("smartGraph-edge-" + rnd()).from("smartGraph-vertex-" + rnd()).to("smartGraph-vertex-" + rnd())); + + String graphId = GRAPH_NAME + rnd(); + final GraphEntity g = db.createGraph(graphId, edgeDefinitions, + new GraphCreateOptions().isSmart(true).smartGraphAttribute("test").numberOfShards(2)).get(); + + assertThat(g).isNotNull(); + assertThat(g.getIsSmart()).isTrue(); + assertThat(g.getSmartGraphAttribute()).isEqualTo("test"); + assertThat(g.getNumberOfShards()).isEqualTo(2); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void hybridSmartGraph(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isEnterprise()); + assumeTrue(isCluster() || isAtLeastVersion(3, 10)); + assumeTrue((isAtLeastVersion(3, 9))); + + final Collection edgeDefinitions = new ArrayList<>(); + String eName = "hybridSmartGraph-edge-" + rnd(); + String v1Name = "hybridSmartGraph-vertex-" + rnd(); + String v2Name = "hybridSmartGraph-vertex-" + rnd(); + edgeDefinitions.add(new EdgeDefinition().collection(eName).from(v1Name).to(v2Name)); + + String graphId = GRAPH_NAME + rnd(); + final GraphEntity g = db.createGraph(graphId, edgeDefinitions, new GraphCreateOptions() + .satellites(eName, v1Name) + .isSmart(true).smartGraphAttribute("test").replicationFactor(2).numberOfShards(2)).get(); + + assertThat(g).isNotNull(); + assertThat(g.getIsSmart()).isTrue(); + assertThat(g.getSmartGraphAttribute()).isEqualTo("test"); + assertThat(g.getNumberOfShards()).isEqualTo(2); + + assertThat(db.collection(eName).getProperties().get().getReplicationFactor()).isEqualTo(ReplicationFactor.ofSatellite()); + assertThat(db.collection(v1Name).getProperties().get().getReplicationFactor()).isEqualTo(ReplicationFactor.ofSatellite()); + assertThat(db.collection(v2Name).getProperties().get().getReplicationFactor().get()).isEqualTo(2); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void disjointSmartGraph(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isEnterprise()); + assumeTrue(isCluster() || isAtLeastVersion(3, 10)); + assumeTrue((isAtLeastVersion(3, 7))); + + final Collection edgeDefinitions = new ArrayList<>(); + edgeDefinitions.add(new EdgeDefinition().collection("smartGraph-edge-" + rnd()).from("smartGraph-vertex-" + rnd()).to("smartGraph-vertex-" + rnd())); + + String graphId = GRAPH_NAME + rnd(); + final GraphEntity g = db.createGraph(graphId, edgeDefinitions, new GraphCreateOptions() + .isSmart(true).isDisjoint(true).smartGraphAttribute("test").numberOfShards(2)).get(); + + assertThat(g).isNotNull(); + assertThat(g.getIsSmart()).isTrue(); + assertThat(g.getIsDisjoint()).isTrue(); + assertThat(g.getSmartGraphAttribute()).isEqualTo("test"); + assertThat(g.getNumberOfShards()).isEqualTo(2); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void hybridDisjointSmartGraph(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isEnterprise()); + assumeTrue(isCluster() || isAtLeastVersion(3, 10)); + assumeTrue((isAtLeastVersion(3, 9))); + + final Collection edgeDefinitions = new ArrayList<>(); + String eName = "hybridDisjointSmartGraph-edge-" + rnd(); + String v1Name = "hybridDisjointSmartGraph-vertex-" + rnd(); + String v2Name = "hybridDisjointSmartGraph-vertex-" + rnd(); + edgeDefinitions.add(new EdgeDefinition().collection(eName).from(v1Name).to(v2Name)); + + String graphId = GRAPH_NAME + rnd(); + final GraphEntity g = db.createGraph(graphId, edgeDefinitions, new GraphCreateOptions() + .satellites(v1Name) + .isSmart(true).isDisjoint(true).smartGraphAttribute("test").replicationFactor(2).numberOfShards(2)).get(); + + assertThat(g).isNotNull(); + assertThat(g.getIsSmart()).isTrue(); + assertThat(g.getIsDisjoint()).isTrue(); + assertThat(g.getSmartGraphAttribute()).isEqualTo("test"); + assertThat(g.getNumberOfShards()).isEqualTo(2); + + assertThat(db.collection(v1Name).getProperties().get().getReplicationFactor()).isEqualTo(ReplicationFactor.ofSatellite()); + assertThat(db.collection(v2Name).getProperties().get().getReplicationFactor().get()).isEqualTo(2); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void enterpriseGraph(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isEnterprise()); + assumeTrue(isCluster() || isAtLeastVersion(3, 10)); + + final Collection edgeDefinitions = new ArrayList<>(); + edgeDefinitions.add(new EdgeDefinition().collection("enterpriseGraph-edge-" + rnd()).from("enterpriseGraph-vertex-" + rnd()).to("enterpriseGraph-vertex-" + rnd())); + + String graphId = GRAPH_NAME + rnd(); + final GraphEntity g = db.createGraph(graphId, edgeDefinitions, new GraphCreateOptions().isSmart(true).numberOfShards(2)).get(); + + assertThat(g).isNotNull(); + assertThat(g.getSmartGraphAttribute()).isNull(); + assertThat(g.getNumberOfShards()).isEqualTo(2); + if (isAtLeastVersion(3, 10)) { + assertThat(g.getIsSmart()).isTrue(); + } else { + assertThat(g.getIsSmart()).isFalse(); + } + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void drop(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + final String edgeCollection = "edge_" + rnd(); + final String vertexCollection = "vertex_" + rnd(); + final String graphId = GRAPH_NAME + rnd(); + final GraphEntity result = db.graph(graphId).create(Collections + .singleton(new EdgeDefinition().collection(edgeCollection).from(vertexCollection).to(vertexCollection))).get(); + assertThat(result).isNotNull(); + db.graph(graphId).drop(); + assertThat(db.collection(edgeCollection).exists().get()).isTrue(); + assertThat(db.collection(vertexCollection).exists().get()).isTrue(); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void dropPlusDropCollections(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + final String edgeCollection = "edge_dropC" + rnd(); + final String vertexCollection = "vertex_dropC" + rnd(); + final String graphId = GRAPH_NAME + "_dropC" + rnd(); + final GraphEntity result = db.graph(graphId).create(Collections + .singleton(new EdgeDefinition().collection(edgeCollection).from(vertexCollection).to(vertexCollection))).get(); + assertThat(result).isNotNull(); + db.graph(graphId).drop(true).get(); + assertThat(db.collection(edgeCollection).exists().get()).isFalse(); + assertThat(db.collection(vertexCollection).exists().get()).isFalse(); + } + +} diff --git a/test-functional/src/test/java/com/arangodb/ArangoGraphTest.java b/test-functional/src/test/java/com/arangodb/ArangoGraphTest.java new file mode 100644 index 000000000..730db4e10 --- /dev/null +++ b/test-functional/src/test/java/com/arangodb/ArangoGraphTest.java @@ -0,0 +1,491 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb; + +import com.arangodb.entity.CollectionPropertiesEntity; +import com.arangodb.entity.EdgeDefinition; +import com.arangodb.entity.GraphEntity; +import com.arangodb.entity.ReplicationFactor; +import com.arangodb.model.*; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; + +import java.util.*; +import java.util.stream.Stream; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + + +/** + * @author Mark Vollmary + * @author Michele Rastelli + */ +class ArangoGraphTest extends BaseJunit5 { + + private static final String GRAPH_NAME = "ArangoGraphTest_graph"; + + private static final String VERTEX_COL_1 = rndName(); + private static final String VERTEX_COL_2 = rndName(); + private static final String VERTEX_COL_3 = rndName(); + private static final String VERTEX_COL_4 = rndName(); + private static final String VERTEX_COL_5 = rndName(); + + private static final String EDGE_COL_1 = rndName(); + private static final String EDGE_COL_2 = rndName(); + private static final String EDGE_COL_3 = rndName(); + + private static final Integer REPLICATION_FACTOR = 2; + private static final Integer NUMBER_OF_SHARDS = 2; + + private static final EdgeDefinition ed1 = + new EdgeDefinition().collection(EDGE_COL_1).from(VERTEX_COL_1).to(VERTEX_COL_5); + private static final EdgeDefinition ed2 = + new EdgeDefinition().collection(EDGE_COL_2).from(VERTEX_COL_2).to(VERTEX_COL_1, VERTEX_COL_3); + + private static Stream graphs() { + return dbsStream() + .map(mapNamedPayload(db -> db.graph(GRAPH_NAME))) + .map(Arguments::of); + } + + @BeforeAll + static void init() { + final Collection edgeDefinitions = Arrays.asList(ed1, ed2); + + final GraphCreateOptions options = new GraphCreateOptions() + .replicationFactor(REPLICATION_FACTOR) + .numberOfShards(NUMBER_OF_SHARDS); + + initGraph(GRAPH_NAME, edgeDefinitions, options); + } + + + @ParameterizedTest + @MethodSource("graphs") + void exists(ArangoGraph graph) { + assertThat(graph.exists()).isTrue(); + assertThat(graph.db().graph(GRAPH_NAME + "no").exists()).isFalse(); + } + + @ParameterizedTest + @MethodSource("dbs") + void createWithReplicationAndWriteConcern(ArangoDatabase db) { + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isCluster()); + + final Collection edgeDefinitions = new ArrayList<>(); + final GraphEntity graph = db.createGraph(GRAPH_NAME + "_1", edgeDefinitions, + new GraphCreateOptions().isSmart(true).replicationFactor(2).writeConcern(2)); + assertThat(graph).isNotNull(); + assertThat(graph.getName()).isEqualTo(GRAPH_NAME + "_1"); + assertThat(graph.getWriteConcern()).isEqualTo(2); + assertThat(graph.getReplicationFactor().get()).isEqualTo(2); + db.graph(GRAPH_NAME + "_1").drop(); + } + + @ParameterizedTest + @MethodSource("dbs") + void getGraphs(ArangoDatabase db) { + final Collection graphs = db.getGraphs(); + assertThat(graphs.stream().anyMatch(it -> it.getName().equals(GRAPH_NAME))).isTrue(); + } + + @ParameterizedTest + @MethodSource("graphs") + void getInfo(ArangoGraph graph) { + final GraphEntity info = graph.getInfo(); + assertThat(info).isNotNull(); + assertThat(info.getName()).isEqualTo(GRAPH_NAME); + assertThat(info.getEdgeDefinitions()).hasSize(2); + + assertThat(info.getEdgeDefinitions()) + .anySatisfy(e1 -> { + assertThat(e1.getCollection()).isEqualTo(EDGE_COL_1); + assertThat(e1.getFrom()).contains(VERTEX_COL_1); + assertThat(e1.getTo()).contains(VERTEX_COL_5); + }) + .anySatisfy(e2 -> { + assertThat(e2.getCollection()).isEqualTo(EDGE_COL_2); + assertThat(e2.getFrom()).contains(VERTEX_COL_2); + assertThat(e2.getTo()).contains(VERTEX_COL_1, VERTEX_COL_3); + }); + + assertThat(info.getOrphanCollections()).isEmpty(); + + if (isCluster()) { + for (final String collection : new String[]{EDGE_COL_1, EDGE_COL_2, VERTEX_COL_1, VERTEX_COL_2, VERTEX_COL_5}) { + final CollectionPropertiesEntity properties = graph.db().collection(collection).getProperties(); + assertThat(properties.getReplicationFactor().get()).isEqualTo(REPLICATION_FACTOR); + assertThat(properties.getNumberOfShards()).isEqualTo(NUMBER_OF_SHARDS); + } + } + } + + @ParameterizedTest + @MethodSource("graphs") + void getVertexCollections(ArangoGraph graph) { + final Collection vertexCollections = graph.getVertexCollections(); + assertThat(vertexCollections) + .hasSize(4) + .contains(VERTEX_COL_1, VERTEX_COL_2, VERTEX_COL_3, VERTEX_COL_5); + } + + @ParameterizedTest + @MethodSource("graphs") + void addVertexCollection(ArangoGraph graph) { + final GraphEntity g = graph.addVertexCollection(VERTEX_COL_4); + assertThat(g).isNotNull(); + final Collection vertexCollections = graph.getVertexCollections(); + assertThat(vertexCollections).contains(VERTEX_COL_1, VERTEX_COL_2, VERTEX_COL_3, VERTEX_COL_4, VERTEX_COL_5); + + // revert + graph.vertexCollection(VERTEX_COL_4).remove(); + } + + @ParameterizedTest + @MethodSource("dbs") + void addSatelliteVertexCollection(ArangoDatabase db) { + assumeTrue(isCluster() || isAtLeastVersion(3, 10)); + assumeTrue(isEnterprise()); + assumeTrue(isAtLeastVersion(3, 9)); + + String v1Name = "vertex-" + rnd(); + + ArangoGraph g = db.graph(GRAPH_NAME + rnd()); + g.create(null, new GraphCreateOptions().isSmart(true).smartGraphAttribute("test")); + g.addVertexCollection(v1Name, new VertexCollectionCreateOptions().satellites(v1Name)); + + Collection vertexCollections = g.getVertexCollections(); + assertThat(vertexCollections).contains(v1Name); + assertThat(db.collection(v1Name).getProperties().getReplicationFactor()).isEqualTo(ReplicationFactor.ofSatellite()); + + // revert + g.drop(); + } + + @ParameterizedTest + @MethodSource("graphs") + void getEdgeCollections(ArangoGraph graph) { + final Collection edgeCollections = graph.getEdgeDefinitions(); + assertThat(edgeCollections) + .hasSize(2) + .contains(EDGE_COL_1, EDGE_COL_2); + } + + @ParameterizedTest + @MethodSource("graphs") + void addEdgeDefinition(ArangoGraph graph) { + EdgeDefinition ed = new EdgeDefinition().collection(EDGE_COL_3).from(VERTEX_COL_1).to(VERTEX_COL_2); + final GraphEntity g = graph.addEdgeDefinition(ed); + assertThat(g).isNotNull(); + final Collection edgeDefinitions = g.getEdgeDefinitions(); + assertThat(edgeDefinitions).hasSize(3); + int count = 0; + for (final EdgeDefinition e : edgeDefinitions) { + if (e.getCollection().equals(EDGE_COL_3)) { + count++; + } + } + assertThat(count).isEqualTo(1); + for (final EdgeDefinition e : edgeDefinitions) { + if (e.getCollection().equals(EDGE_COL_3)) { + assertThat(e.getFrom()).contains(VERTEX_COL_1); + assertThat(e.getTo()).contains(VERTEX_COL_2); + } + } + if (isCluster()) { + final CollectionPropertiesEntity properties = graph.db().collection(EDGE_COL_3).getProperties(); + assertThat(properties.getReplicationFactor().get()).isEqualTo(REPLICATION_FACTOR); + assertThat(properties.getNumberOfShards()).isEqualTo(NUMBER_OF_SHARDS); + } + + // revert + graph.edgeCollection(EDGE_COL_3).remove(); + } + + @ParameterizedTest + @MethodSource("dbs") + void addSatelliteEdgeDefinition(ArangoDatabase db) { + assumeTrue(isCluster() || isAtLeastVersion(3, 10)); + assumeTrue(isEnterprise()); + assumeTrue(isAtLeastVersion(3, 9)); + + String eName = "edge-" + rnd(); + String v1Name = "vertex-" + rnd(); + String v2Name = "vertex-" + rnd(); + EdgeDefinition ed = new EdgeDefinition().collection(eName).from(v1Name).to(v2Name).satellites(v1Name); + + ArangoGraph g = db.graph(GRAPH_NAME + rnd()); + g.create(null, new GraphCreateOptions().isSmart(true).smartGraphAttribute("test")); + g.addEdgeDefinition(ed); + final GraphEntity ge = g.getInfo(); + assertThat(ge).isNotNull(); + final Collection edgeDefinitions = ge.getEdgeDefinitions(); + assertThat(edgeDefinitions).hasSize(1); + EdgeDefinition e = edgeDefinitions.iterator().next(); + assertThat(e.getCollection()).isEqualTo(eName); + assertThat(e.getFrom()).contains(v1Name); + assertThat(e.getTo()).contains(v2Name); + + assertThat(db.collection(v1Name).getProperties().getReplicationFactor()).isEqualTo(ReplicationFactor.ofSatellite()); + + // revert + g.drop(); + } + + @ParameterizedTest + @MethodSource("graphs") + void replaceEdgeDefinition(ArangoGraph graph) { + final GraphEntity g = graph + .replaceEdgeDefinition(new EdgeDefinition().collection(EDGE_COL_1).from(VERTEX_COL_3).to(VERTEX_COL_4)); + final Collection edgeDefinitions = g.getEdgeDefinitions(); + assertThat(edgeDefinitions).hasSize(2); + int count = 0; + for (final EdgeDefinition e : edgeDefinitions) { + if (e.getCollection().equals(EDGE_COL_1)) { + count++; + } + } + assertThat(count).isEqualTo(1); + for (final EdgeDefinition e : edgeDefinitions) { + if (e.getCollection().equals(EDGE_COL_1)) { + assertThat(e.getFrom()).contains(VERTEX_COL_3); + assertThat(e.getTo()).contains(VERTEX_COL_4); + } + } + assertThat(graph.db().collection(VERTEX_COL_1).exists()).isTrue(); + + // revert + graph.edgeCollection(EDGE_COL_1).remove(); + graph.vertexCollection(VERTEX_COL_4).remove(); + graph.addEdgeDefinition(ed1); + } + + @ParameterizedTest + @MethodSource("graphs") + @Disabled + // FIXME: with dropCollections=true the vertex collections remain in the graph as orphan and not dropped + void replaceEdgeDefinitionDropCollections(ArangoGraph graph) { + final GraphEntity g = graph + .replaceEdgeDefinition(new EdgeDefinition().collection(EDGE_COL_1).from(VERTEX_COL_3).to(VERTEX_COL_4), + new ReplaceEdgeDefinitionOptions().waitForSync(true).dropCollections(true)); + final Collection edgeDefinitions = g.getEdgeDefinitions(); + assertThat(edgeDefinitions).hasSize(2); + int count = 0; + for (final EdgeDefinition e : edgeDefinitions) { + if (e.getCollection().equals(EDGE_COL_1)) { + count++; + } + } + assertThat(count).isEqualTo(1); + for (final EdgeDefinition e : edgeDefinitions) { + if (e.getCollection().equals(EDGE_COL_1)) { + assertThat(e.getFrom()).contains(VERTEX_COL_3); + assertThat(e.getTo()).contains(VERTEX_COL_4); + } + } + assertThat(graph.db().collection(VERTEX_COL_5).exists()).isFalse(); + + // revert + graph.edgeCollection(EDGE_COL_1).remove(); + graph.vertexCollection(VERTEX_COL_4).remove(); + graph.addEdgeDefinition(ed1); + } + + @ParameterizedTest + @MethodSource("graphs") + void removeEdgeDefinition(ArangoGraph graph) { + graph.edgeCollection(EDGE_COL_1).remove(); + Collection edgeDefinitions = graph.getEdgeDefinitions(); + assertThat(edgeDefinitions).hasSize(1); + assertThat(edgeDefinitions.iterator().next()).isEqualTo(EDGE_COL_2); + assertThat(graph.db().collection(EDGE_COL_1).exists()).isTrue(); + + //revert + graph.addEdgeDefinition(ed1); + } + + @ParameterizedTest + @MethodSource("graphs") + void removeEdgeDefinitionDropCollections(ArangoGraph graph) { + graph.edgeCollection(EDGE_COL_1).remove(new EdgeCollectionRemoveOptions() + .dropCollections(true) + .waitForSync(true)); + Collection edgeDefinitions = graph.getEdgeDefinitions(); + assertThat(edgeDefinitions).hasSize(1); + assertThat(edgeDefinitions.iterator().next()).isEqualTo(EDGE_COL_2); + assertThat(graph.db().collection(EDGE_COL_1).exists()).isFalse(); + + //revert + graph.addEdgeDefinition(ed1); + } + + @ParameterizedTest + @MethodSource("dbs") + void smartGraph(ArangoDatabase db) { + assumeTrue(isEnterprise()); + assumeTrue(isCluster() || isAtLeastVersion(3, 10)); + + final Collection edgeDefinitions = new ArrayList<>(); + edgeDefinitions.add(new EdgeDefinition().collection("smartGraph-edge-" + rnd()).from("smartGraph-vertex-" + rnd()).to("smartGraph-vertex-" + rnd())); + + String graphId = GRAPH_NAME + rnd(); + final GraphEntity g = db.createGraph(graphId, edgeDefinitions, + new GraphCreateOptions().isSmart(true).smartGraphAttribute("test").numberOfShards(2)); + + assertThat(g).isNotNull(); + assertThat(g.getIsSmart()).isTrue(); + assertThat(g.getSmartGraphAttribute()).isEqualTo("test"); + assertThat(g.getNumberOfShards()).isEqualTo(2); + } + + @ParameterizedTest + @MethodSource("dbs") + void hybridSmartGraph(ArangoDatabase db) { + assumeTrue(isEnterprise()); + assumeTrue(isCluster() || isAtLeastVersion(3, 10)); + assumeTrue((isAtLeastVersion(3, 9))); + + final Collection edgeDefinitions = new ArrayList<>(); + String eName = "hybridSmartGraph-edge-" + rnd(); + String v1Name = "hybridSmartGraph-vertex-" + rnd(); + String v2Name = "hybridSmartGraph-vertex-" + rnd(); + edgeDefinitions.add(new EdgeDefinition().collection(eName).from(v1Name).to(v2Name)); + + String graphId = GRAPH_NAME + rnd(); + final GraphEntity g = db.createGraph(graphId, edgeDefinitions, new GraphCreateOptions() + .satellites(eName, v1Name) + .isSmart(true).smartGraphAttribute("test").replicationFactor(2).numberOfShards(2)); + + assertThat(g).isNotNull(); + assertThat(g.getIsSmart()).isTrue(); + assertThat(g.getSmartGraphAttribute()).isEqualTo("test"); + assertThat(g.getNumberOfShards()).isEqualTo(2); + + assertThat(db.collection(eName).getProperties().getReplicationFactor()).isEqualTo(ReplicationFactor.ofSatellite()); + assertThat(db.collection(v1Name).getProperties().getReplicationFactor()).isEqualTo(ReplicationFactor.ofSatellite()); + assertThat(db.collection(v2Name).getProperties().getReplicationFactor().get()).isEqualTo(2); + } + + @ParameterizedTest + @MethodSource("dbs") + void disjointSmartGraph(ArangoDatabase db) { + assumeTrue(isEnterprise()); + assumeTrue(isCluster() || isAtLeastVersion(3, 10)); + assumeTrue((isAtLeastVersion(3, 7))); + + final Collection edgeDefinitions = new ArrayList<>(); + edgeDefinitions.add(new EdgeDefinition().collection("smartGraph-edge-" + rnd()).from("smartGraph-vertex-" + rnd()).to("smartGraph-vertex-" + rnd())); + + String graphId = GRAPH_NAME + rnd(); + final GraphEntity g = db.createGraph(graphId, edgeDefinitions, new GraphCreateOptions() + .isSmart(true).isDisjoint(true).smartGraphAttribute("test").numberOfShards(2)); + + assertThat(g).isNotNull(); + assertThat(g.getIsSmart()).isTrue(); + assertThat(g.getIsDisjoint()).isTrue(); + assertThat(g.getSmartGraphAttribute()).isEqualTo("test"); + assertThat(g.getNumberOfShards()).isEqualTo(2); + } + + @ParameterizedTest + @MethodSource("dbs") + void hybridDisjointSmartGraph(ArangoDatabase db) { + assumeTrue(isEnterprise()); + assumeTrue(isCluster() || isAtLeastVersion(3, 10)); + assumeTrue((isAtLeastVersion(3, 9))); + + final Collection edgeDefinitions = new ArrayList<>(); + String eName = "hybridDisjointSmartGraph-edge-" + rnd(); + String v1Name = "hybridDisjointSmartGraph-vertex-" + rnd(); + String v2Name = "hybridDisjointSmartGraph-vertex-" + rnd(); + edgeDefinitions.add(new EdgeDefinition().collection(eName).from(v1Name).to(v2Name)); + + String graphId = GRAPH_NAME + rnd(); + final GraphEntity g = db.createGraph(graphId, edgeDefinitions, new GraphCreateOptions() + .satellites(v1Name) + .isSmart(true).isDisjoint(true).smartGraphAttribute("test").replicationFactor(2).numberOfShards(2)); + + assertThat(g).isNotNull(); + assertThat(g.getIsSmart()).isTrue(); + assertThat(g.getIsDisjoint()).isTrue(); + assertThat(g.getSmartGraphAttribute()).isEqualTo("test"); + assertThat(g.getNumberOfShards()).isEqualTo(2); + + assertThat(db.collection(v1Name).getProperties().getReplicationFactor()).isEqualTo(ReplicationFactor.ofSatellite()); + assertThat(db.collection(v2Name).getProperties().getReplicationFactor().get()).isEqualTo(2); + } + + @ParameterizedTest + @MethodSource("dbs") + void enterpriseGraph(ArangoDatabase db) { + assumeTrue(isEnterprise()); + assumeTrue(isCluster() || isAtLeastVersion(3, 10)); + + final Collection edgeDefinitions = new ArrayList<>(); + edgeDefinitions.add(new EdgeDefinition().collection("enterpriseGraph-edge-" + rnd()).from("enterpriseGraph-vertex-" + rnd()).to("enterpriseGraph-vertex-" + rnd())); + + String graphId = GRAPH_NAME + rnd(); + final GraphEntity g = db.createGraph(graphId, edgeDefinitions, new GraphCreateOptions().isSmart(true).numberOfShards(2)); + + assertThat(g).isNotNull(); + assertThat(g.getSmartGraphAttribute()).isNull(); + assertThat(g.getNumberOfShards()).isEqualTo(2); + if (isAtLeastVersion(3, 10)) { + assertThat(g.getIsSmart()).isTrue(); + } else { + assertThat(g.getIsSmart()).isFalse(); + } + } + + @ParameterizedTest + @MethodSource("dbs") + void drop(ArangoDatabase db) { + final String edgeCollection = "edge_" + rnd(); + final String vertexCollection = "vertex_" + rnd(); + final String graphId = GRAPH_NAME + rnd(); + final GraphEntity result = db.graph(graphId).create(Collections + .singleton(new EdgeDefinition().collection(edgeCollection).from(vertexCollection).to(vertexCollection))); + assertThat(result).isNotNull(); + db.graph(graphId).drop(); + assertThat(db.collection(edgeCollection).exists()).isTrue(); + assertThat(db.collection(vertexCollection).exists()).isTrue(); + } + + @ParameterizedTest + @MethodSource("dbs") + void dropPlusDropCollections(ArangoDatabase db) { + final String edgeCollection = "edge_dropC" + rnd(); + final String vertexCollection = "vertex_dropC" + rnd(); + final String graphId = GRAPH_NAME + "_dropC" + rnd(); + final GraphEntity result = db.graph(graphId).create(Collections + .singleton(new EdgeDefinition().collection(edgeCollection).from(vertexCollection).to(vertexCollection))); + assertThat(result).isNotNull(); + db.graph(graphId).drop(true); + assertThat(db.collection(edgeCollection).exists()).isFalse(); + assertThat(db.collection(vertexCollection).exists()).isFalse(); + } + +} diff --git a/test-functional/src/test/java/com/arangodb/ArangoSearchAsyncTest.java b/test-functional/src/test/java/com/arangodb/ArangoSearchAsyncTest.java new file mode 100644 index 000000000..400c497ca --- /dev/null +++ b/test-functional/src/test/java/com/arangodb/ArangoSearchAsyncTest.java @@ -0,0 +1,1112 @@ +/* + * DISCLAIMER + * + * Copyright 2018 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb; + +import com.arangodb.entity.InvertedIndexField; +import com.arangodb.entity.ViewEntity; +import com.arangodb.entity.ViewType; +import com.arangodb.entity.arangosearch.*; +import com.arangodb.entity.arangosearch.analyzer.*; +import com.arangodb.model.InvertedIndexOptions; +import com.arangodb.model.arangosearch.*; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.condition.DisabledIfSystemProperty; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; + +import java.util.*; +import java.util.concurrent.ExecutionException; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.catchThrowable; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + +/** + * @author Mark Vollmary + * @author Michele Rastelli + */ +class ArangoSearchAsyncTest extends BaseJunit5 { + + private static final String COLL_1 = "ArangoSearchTest_view_replace_prop"; + private static final String COLL_2 = "ArangoSearchTest_view_update_prop"; + + @BeforeAll + static void init() { + initCollections(COLL_1, COLL_2); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void exists(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 4)); + String viewName = rndName(); + db.createArangoSearch(viewName, new ArangoSearchCreateOptions()).get(); + assertThat(db.arangoSearch(viewName).exists().get()).isTrue(); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void createAndExistsSearchAlias(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 10)); + String viewName = rndName(); + db.createSearchAlias(viewName, new SearchAliasCreateOptions()).get(); + assertThat(db.arangoSearch(viewName).exists().get()).isTrue(); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void getInfo(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 4)); + String viewName = rndName(); + db.createArangoSearch(viewName, new ArangoSearchCreateOptions()).get(); + final ViewEntity info = db.arangoSearch(viewName).getInfo().get(); + assertThat(info).isNotNull(); + assertThat(info.getId()).isNotNull(); + assertThat(info.getName()).isEqualTo(viewName); + assertThat(info.getType()).isEqualTo(ViewType.ARANGO_SEARCH); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void drop(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 4)); + String viewName = rndName(); + db.createArangoSearch(viewName, new ArangoSearchCreateOptions()).get(); + final ArangoSearchAsync view = db.arangoSearch(viewName); + view.drop().get(); + assertThat(view.exists().get()).isFalse(); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void rename(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isSingleServer()); + assumeTrue(isAtLeastVersion(3, 4)); + String viewName = rndName(); + final String name = viewName + "_new"; + db.createArangoSearch(name, new ArangoSearchCreateOptions()).get(); + db.arangoSearch(name).rename(viewName).get(); + assertThat(db.arangoSearch(name).exists().get()).isFalse(); + assertThat(db.arangoSearch(viewName).exists().get()).isTrue(); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void createArangoSearchView(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 4)); + String viewName = rndName(); + final ViewEntity info = db.arangoSearch(viewName).create().get(); + assertThat(info).isNotNull(); + assertThat(info.getId()).isNotNull(); + assertThat(info.getName()).isEqualTo(viewName); + assertThat(info.getType()).isEqualTo(ViewType.ARANGO_SEARCH); + assertThat(db.arangoSearch(viewName).exists().get()).isTrue(); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void createSearchAliasView(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 10)); + String viewName = rndName(); + final ViewEntity info = db.searchAlias(viewName).create().get(); + assertThat(info).isNotNull(); + assertThat(info.getId()).isNotNull(); + assertThat(info.getName()).isEqualTo(viewName); + assertThat(info.getType()).isEqualTo(ViewType.SEARCH_ALIAS); + assertThat(db.searchAlias(viewName).exists().get()).isTrue(); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void createArangoSearchViewWithOptions(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 4)); + String viewName = rndName(); + final ArangoSearchCreateOptions options = new ArangoSearchCreateOptions(); + final ViewEntity info = db.arangoSearch(viewName).create(options).get(); + assertThat(info).isNotNull(); + assertThat(info.getId()).isNotNull(); + assertThat(info.getName()).isEqualTo(viewName); + assertThat(info.getType()).isEqualTo(ViewType.ARANGO_SEARCH); + assertThat(db.arangoSearch(viewName).exists().get()).isTrue(); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void createArangoSearchViewWithPrimarySort(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 5)); + String viewName = rndName(); + final ArangoSearchCreateOptions options = new ArangoSearchCreateOptions(); + + final PrimarySort primarySort = PrimarySort.on("myFieldName"); + primarySort.ascending(true); + options.primarySort(primarySort); + options.primarySortCompression(ArangoSearchCompression.none); + options.consolidationIntervalMsec(666666L); + StoredValue storedValue = new StoredValue(Arrays.asList("a", "b"), ArangoSearchCompression.none); + options.storedValues(storedValue); + + final ArangoSearchAsync view = db.arangoSearch(viewName); + final ViewEntity info = view.create(options).get(); + assertThat(info).isNotNull(); + assertThat(info.getId()).isNotNull(); + assertThat(info.getName()).isEqualTo(viewName); + assertThat(info.getType()).isEqualTo(ViewType.ARANGO_SEARCH); + assertThat(db.arangoSearch(viewName).exists().get()).isTrue(); + + if (isAtLeastVersion(3, 7)) { + final ArangoSearchPropertiesEntity properties = view.getProperties().get(); + assertThat(properties.getPrimarySortCompression()).isEqualTo(ArangoSearchCompression.none); + Collection retrievedStoredValues = properties.getStoredValues(); + assertThat(retrievedStoredValues).isNotNull(); + assertThat(retrievedStoredValues).hasSize(1); + StoredValue retrievedStoredValue = retrievedStoredValues.iterator().next(); + assertThat(retrievedStoredValue).isNotNull(); + assertThat(retrievedStoredValue.getFields()).isEqualTo(storedValue.getFields()); + assertThat(retrievedStoredValue.getCompression()).isEqualTo(storedValue.getCompression()); + } + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void createArangoSearchViewWithCommitIntervalMsec(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 5)); + String viewName = rndName(); + final ArangoSearchCreateOptions options = new ArangoSearchCreateOptions(); + options.commitIntervalMsec(666666L); + + final ViewEntity info = db.arangoSearch(viewName).create(options).get(); + assertThat(info).isNotNull(); + assertThat(info.getId()).isNotNull(); + assertThat(info.getName()).isEqualTo(viewName); + assertThat(info.getType()).isEqualTo(ViewType.ARANGO_SEARCH); + assertThat(db.arangoSearch(viewName).exists().get()).isTrue(); + + // check commit interval msec property + final ArangoSearchAsync view = db.arangoSearch(viewName); + final ArangoSearchPropertiesEntity properties = view.getProperties().get(); + assertThat(properties.getCommitIntervalMsec()).isEqualTo(666666L); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void createSearchAliasViewWithOptions(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 10)); + String viewName = rndName(); + final SearchAliasCreateOptions options = new SearchAliasCreateOptions(); + final ViewEntity info = db.searchAlias(viewName).create(options).get(); + assertThat(info).isNotNull(); + assertThat(info.getId()).isNotNull(); + assertThat(info.getName()).isEqualTo(viewName); + assertThat(info.getType()).isEqualTo(ViewType.SEARCH_ALIAS); + assertThat(db.searchAlias(viewName).exists().get()).isTrue(); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void createSearchAliasViewWithIndexesAndGetProperties(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 10)); + ArangoCollectionAsync col = db.collection(COLL_1); + String idxName1 = rndName(); + col.ensureInvertedIndex(new InvertedIndexOptions() + .name(idxName1) + .fields(new InvertedIndexField().name("a" + rnd()))).get(); + + String idxName2 = rndName(); + col.ensureInvertedIndex(new InvertedIndexOptions() + .name(idxName2) + .fields(new InvertedIndexField().name("a" + rnd()))).get(); + + String viewName = rndName(); + final SearchAliasCreateOptions options = new SearchAliasCreateOptions() + .indexes( + new SearchAliasIndex(COLL_1, idxName1, SearchAliasIndex.OperationType.add), + new SearchAliasIndex(COLL_1, idxName2, SearchAliasIndex.OperationType.add), + new SearchAliasIndex(COLL_1, idxName2, SearchAliasIndex.OperationType.del) + ); + final ViewEntity info = db.searchAlias(viewName).create(options).get(); + assertThat(info).isNotNull(); + assertThat(info.getId()).isNotNull(); + assertThat(info.getName()).isEqualTo(viewName); + assertThat(info.getType()).isEqualTo(ViewType.SEARCH_ALIAS); + + final SearchAliasPropertiesEntity properties = db.searchAlias(viewName).getProperties().get(); + assertThat(properties).isNotNull(); + assertThat(properties.getId()).isNotNull(); + assertThat(properties.getName()).isEqualTo(viewName); + assertThat(properties.getType()).isEqualTo(ViewType.SEARCH_ALIAS); + assertThat(properties.getIndexes()) + .isNotNull() + .isNotEmpty() + .anyMatch(i -> i.getCollection().equals(COLL_1) && i.getIndex().equals(idxName1)); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void getArangoSearchViewProperties(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 4)); + String viewName = rndName(); + final ArangoSearchAsync view = db.arangoSearch(viewName); + view.create(new ArangoSearchCreateOptions()).get(); + final ArangoSearchPropertiesEntity properties = view.getProperties().get(); + assertThat(properties).isNotNull(); + assertThat(properties.getId()).isNotNull(); + assertThat(properties.getName()).isEqualTo(viewName); + assertThat(properties.getType()).isEqualTo(ViewType.ARANGO_SEARCH); + assertThat(properties.getConsolidationIntervalMsec()).isNotNull(); + assertThat(properties.getCleanupIntervalStep()).isNotNull(); + final ConsolidationPolicy consolidate = properties.getConsolidationPolicy(); + assertThat(consolidate).isNotNull(); + final Collection links = properties.getLinks(); + assertThat(links).isEmpty(); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void updateArangoSearchViewProperties(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 4)); + String viewName = rndName(); + final ArangoSearchAsync view = db.arangoSearch(viewName); + view.create(new ArangoSearchCreateOptions()).get(); + final ArangoSearchPropertiesOptions options = new ArangoSearchPropertiesOptions(); + options.cleanupIntervalStep(15L); + options.consolidationIntervalMsec(65000L); + options.consolidationPolicy(ConsolidationPolicy.of(ConsolidationType.BYTES_ACCUM).threshold(1.)); + options.link(CollectionLink.on(COLL_2) + .fields(FieldLink.on("value").analyzers("identity").trackListPositions(true).includeAllFields(true) + .storeValues(StoreValuesType.ID))); + final ArangoSearchPropertiesEntity properties = view.updateProperties(options).get(); + assertThat(properties).isNotNull(); + assertThat(properties.getCleanupIntervalStep()).isEqualTo(15L); + assertThat(properties.getConsolidationIntervalMsec()).isEqualTo(65000L); + final ConsolidationPolicy consolidate = properties.getConsolidationPolicy(); + assertThat(consolidate).isNotNull(); + assertThat(consolidate.getType()).isEqualTo(ConsolidationType.BYTES_ACCUM); + assertThat(consolidate.getThreshold()).isEqualTo(1.); + assertThat(properties.getLinks()).hasSize(1); + final CollectionLink link = properties.getLinks().iterator().next(); + assertThat(link.getName()).isEqualTo(COLL_2); + assertThat(link.getFields()).hasSize(1); + final FieldLink next = link.getFields().iterator().next(); + assertThat(next.getName()).isEqualTo("value"); + assertThat(next.getIncludeAllFields()).isTrue(); + assertThat(next.getTrackListPositions()).isTrue(); + assertThat(next.getStoreValues()).isEqualTo(StoreValuesType.ID); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void updateSearchAliasViewWithIndexesAndGetProperties(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 10)); + ArangoCollectionAsync col = db.collection(COLL_1); + String idxName = rndName(); + col.ensureInvertedIndex(new InvertedIndexOptions() + .name(idxName) + .fields(new InvertedIndexField().name("a" + rnd()))).get(); + ArangoCollectionAsync col2 = db.collection(COLL_2); + String idxName2 = rndName(); + col2.ensureInvertedIndex(new InvertedIndexOptions() + .name(idxName2) + .fields(new InvertedIndexField().name("a" + rnd()))).get(); + + String viewName = rndName(); + final SearchAliasCreateOptions options = new SearchAliasCreateOptions() + .indexes(new SearchAliasIndex(COLL_1, idxName)); + final ViewEntity info = db.searchAlias(viewName).create(options).get(); + db.searchAlias(viewName).updateProperties(new SearchAliasPropertiesOptions() + .indexes(new SearchAliasIndex(COLL_2, idxName2))).get(); + + assertThat(info).isNotNull(); + assertThat(info.getId()).isNotNull(); + assertThat(info.getName()).isEqualTo(viewName); + assertThat(info.getType()).isEqualTo(ViewType.SEARCH_ALIAS); + + final SearchAliasPropertiesEntity properties = db.searchAlias(viewName).getProperties().get(); + assertThat(properties).isNotNull(); + assertThat(properties.getId()).isNotNull(); + assertThat(properties.getName()).isEqualTo(viewName); + assertThat(properties.getType()).isEqualTo(ViewType.SEARCH_ALIAS); + assertThat(properties.getIndexes()) + .isNotNull() + .isNotEmpty() + .hasSize(2) + .anyMatch(i -> i.getCollection().equals(COLL_1) && i.getIndex().equals(idxName)) + .anyMatch(i -> i.getCollection().equals(COLL_2) && i.getIndex().equals(idxName2)); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void replaceArangoSearchViewProperties(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 4)); + String viewName = rndName(); + final ArangoSearchAsync view = db.arangoSearch(viewName); + view.create(new ArangoSearchCreateOptions()).get(); + final ArangoSearchPropertiesOptions options = new ArangoSearchPropertiesOptions(); + options.link(CollectionLink.on(COLL_1) + .fields(FieldLink.on("value").analyzers("identity"))); + final ArangoSearchPropertiesEntity properties = view.replaceProperties(options).get(); + assertThat(properties).isNotNull(); + assertThat(properties.getLinks()).hasSize(1); + final CollectionLink link = properties.getLinks().iterator().next(); + assertThat(link.getName()).isEqualTo(COLL_1); + assertThat(link.getFields()).hasSize(1); + assertThat(link.getFields().iterator().next().getName()).isEqualTo("value"); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void replaceSearchAliasViewWithIndexesAndGetProperties(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 10)); + ArangoCollectionAsync col = db.collection(COLL_1); + String idxName = rndName(); + col.ensureInvertedIndex(new InvertedIndexOptions() + .name(idxName) + .fields(new InvertedIndexField().name("a" + rnd()))).get(); + ArangoCollectionAsync col2 = db.collection(COLL_2); + String idxName2 = rndName(); + col2.ensureInvertedIndex(new InvertedIndexOptions() + .name(idxName2) + .fields(new InvertedIndexField().name("a" + rnd()))).get(); + + String viewName = rndName(); + final SearchAliasCreateOptions options = new SearchAliasCreateOptions() + .indexes(new SearchAliasIndex(COLL_1, idxName)); + final ViewEntity info = db.searchAlias(viewName).create(options).get(); + db.searchAlias(viewName).replaceProperties(new SearchAliasPropertiesOptions() + .indexes(new SearchAliasIndex(COLL_2, idxName2))).get(); + + assertThat(info).isNotNull(); + assertThat(info.getId()).isNotNull(); + assertThat(info.getName()).isEqualTo(viewName); + assertThat(info.getType()).isEqualTo(ViewType.SEARCH_ALIAS); + + final SearchAliasPropertiesEntity properties = db.searchAlias(viewName).getProperties().get(); + assertThat(properties).isNotNull(); + assertThat(properties.getId()).isNotNull(); + assertThat(properties.getName()).isEqualTo(viewName); + assertThat(properties.getType()).isEqualTo(ViewType.SEARCH_ALIAS); + assertThat(properties.getIndexes()) + .isNotNull() + .isNotEmpty() + .hasSize(1) + .anyMatch(i -> i.getCollection().equals(COLL_2) && i.getIndex().equals(idxName2)); + } + + private void createGetAndDeleteTypedAnalyzer(ArangoDatabaseAsync db, SearchAnalyzer analyzer) throws ExecutionException, InterruptedException { + + String fullyQualifiedName = db.name() + "::" + analyzer.getName(); + analyzer.setName(fullyQualifiedName); + + // createAnalyzer + SearchAnalyzer createdAnalyzer = db.createSearchAnalyzer(analyzer).get(); + assertThat(createdAnalyzer).isEqualTo(analyzer); + + // getAnalyzer + SearchAnalyzer gotAnalyzer = db.getSearchAnalyzer(analyzer.getName()).get(); + assertThat(gotAnalyzer).isEqualTo(analyzer); + + // getAnalyzers + SearchAnalyzer foundAnalyzer = + db.getSearchAnalyzers().get().stream().filter(it -> it.getName().equals(fullyQualifiedName)) + .findFirst().get(); + assertThat(foundAnalyzer).isEqualTo(analyzer); + + // deleteAnalyzer + AnalyzerDeleteOptions deleteOptions = new AnalyzerDeleteOptions(); + deleteOptions.setForce(true); + + db.deleteSearchAnalyzer(analyzer.getName(), deleteOptions).get(); + + Throwable thrown = catchThrowable(() -> db.getSearchAnalyzer(analyzer.getName()).get()).getCause(); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + ArangoDBException e = (ArangoDBException) thrown; + assertThat(e.getResponseCode()).isEqualTo(404); + assertThat(e.getErrorNum()).isEqualTo(1202); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void identityAnalyzerTyped(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 5)); + + String name = "test-" + UUID.randomUUID(); + + Set features = new HashSet<>(); + features.add(AnalyzerFeature.frequency); + features.add(AnalyzerFeature.norm); + features.add(AnalyzerFeature.position); + + IdentityAnalyzer analyzer = new IdentityAnalyzer(); + analyzer.setFeatures(features); + analyzer.setName(name); + + createGetAndDeleteTypedAnalyzer(db, analyzer); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void delimiterAnalyzerTyped(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 5)); + + String name = "test-" + UUID.randomUUID(); + + Set features = new HashSet<>(); + features.add(AnalyzerFeature.frequency); + features.add(AnalyzerFeature.norm); + features.add(AnalyzerFeature.position); + + DelimiterAnalyzerProperties properties = new DelimiterAnalyzerProperties(); + properties.setDelimiter("-"); + + DelimiterAnalyzer analyzer = new DelimiterAnalyzer(); + analyzer.setFeatures(features); + analyzer.setName(name); + analyzer.setProperties(properties); + + createGetAndDeleteTypedAnalyzer(db, analyzer); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void multiDelimiterAnalyzerTyped(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 12)); + + String name = "test-" + UUID.randomUUID(); + + Set features = new HashSet<>(); + features.add(AnalyzerFeature.frequency); + features.add(AnalyzerFeature.norm); + features.add(AnalyzerFeature.position); + + MultiDelimiterAnalyzerProperties properties = new MultiDelimiterAnalyzerProperties(); + properties.setDelimiters("-", ",", "..."); + + MultiDelimiterAnalyzer analyzer = new MultiDelimiterAnalyzer(); + analyzer.setFeatures(features); + analyzer.setName(name); + analyzer.setProperties(properties); + + createGetAndDeleteTypedAnalyzer(db, analyzer); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void stemAnalyzerTyped(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 5)); + + String name = "test-" + UUID.randomUUID(); + + Set features = new HashSet<>(); + features.add(AnalyzerFeature.frequency); + features.add(AnalyzerFeature.norm); + features.add(AnalyzerFeature.position); + + StemAnalyzerProperties properties = new StemAnalyzerProperties(); + properties.setLocale("ru"); + + StemAnalyzer options = new StemAnalyzer(); + options.setFeatures(features); + options.setName(name); + options.setProperties(properties); + + createGetAndDeleteTypedAnalyzer(db, options); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void normAnalyzerTyped(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 5)); + + String name = "test-" + UUID.randomUUID(); + + Set features = new HashSet<>(); + features.add(AnalyzerFeature.frequency); + features.add(AnalyzerFeature.norm); + features.add(AnalyzerFeature.position); + + NormAnalyzerProperties properties = new NormAnalyzerProperties(); + properties.setLocale("ru"); + properties.setAnalyzerCase(SearchAnalyzerCase.lower); + properties.setAccent(true); + + NormAnalyzer options = new NormAnalyzer(); + options.setFeatures(features); + options.setName(name); + options.setProperties(properties); + + createGetAndDeleteTypedAnalyzer(db, options); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void ngramAnalyzerTyped(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 5)); + + String name = "test-" + UUID.randomUUID(); + + Set features = new HashSet<>(); + features.add(AnalyzerFeature.frequency); + features.add(AnalyzerFeature.norm); + features.add(AnalyzerFeature.position); + + NGramAnalyzerProperties properties = new NGramAnalyzerProperties(); + properties.setMax(6L); + properties.setMin(3L); + properties.setPreserveOriginal(true); + + NGramAnalyzer analyzer = new NGramAnalyzer(); + analyzer.setFeatures(features); + analyzer.setName(name); + analyzer.setType(AnalyzerType.ngram); + analyzer.setProperties(properties); + + createGetAndDeleteTypedAnalyzer(db, analyzer); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void enhancedNgramAnalyzerTyped(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 6)); + + String name = "test-" + UUID.randomUUID(); + + Set features = new HashSet<>(); + features.add(AnalyzerFeature.frequency); + features.add(AnalyzerFeature.norm); + features.add(AnalyzerFeature.position); + + NGramAnalyzerProperties properties = new NGramAnalyzerProperties(); + properties.setMax(6L); + properties.setMin(3L); + properties.setPreserveOriginal(true); + properties.setStartMarker("^"); + properties.setEndMarker("^"); + properties.setStreamType(StreamType.utf8); + + NGramAnalyzer analyzer = new NGramAnalyzer(); + analyzer.setFeatures(features); + analyzer.setName(name); + analyzer.setProperties(properties); + + createGetAndDeleteTypedAnalyzer(db, analyzer); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void textAnalyzerTyped(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 5)); + + String name = "test-" + UUID.randomUUID(); + + Set features = new HashSet<>(); + features.add(AnalyzerFeature.frequency); + features.add(AnalyzerFeature.norm); + features.add(AnalyzerFeature.position); + + TextAnalyzerProperties properties = new TextAnalyzerProperties(); + properties.setLocale("ru"); + properties.setAnalyzerCase(SearchAnalyzerCase.lower); + properties.setAccent(true); + properties.setStemming(true); + + TextAnalyzer analyzer = new TextAnalyzer(); + analyzer.setFeatures(features); + analyzer.setName(name); + analyzer.setType(AnalyzerType.text); + analyzer.setProperties(properties); + + createGetAndDeleteTypedAnalyzer(db, analyzer); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void enhancedTextAnalyzerTyped(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 6)); + + String name = "test-" + UUID.randomUUID(); + + Set features = new HashSet<>(); + features.add(AnalyzerFeature.frequency); + features.add(AnalyzerFeature.norm); + features.add(AnalyzerFeature.position); + + EdgeNgram edgeNgram = new EdgeNgram(); + edgeNgram.setMin(2L); + edgeNgram.setMax(100000L); + edgeNgram.setPreserveOriginal(true); + + TextAnalyzerProperties properties = new TextAnalyzerProperties(); + properties.setLocale("ru"); + properties.setAnalyzerCase(SearchAnalyzerCase.lower); + properties.setAccent(true); + properties.setStemming(true); + properties.setEdgeNgram(edgeNgram); + + TextAnalyzer analyzer = new TextAnalyzer(); + analyzer.setFeatures(features); + analyzer.setName(name); + analyzer.setProperties(properties); + + createGetAndDeleteTypedAnalyzer(db, analyzer); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void arangoSearchOptions(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 4)); + String viewName = rndName(); + FieldLink field = FieldLink.on("f1") + .inBackground(true) + .cache(false); + if (isEnterprise()) { + field.nested(FieldLink.on("f2")); + } + CollectionLink link = CollectionLink.on(COLL_1) + .analyzers("identity") + .fields(field) + .includeAllFields(true) + .storeValues(StoreValuesType.ID) + .trackListPositions(false) + .inBackground(true) + .cache(true); + + if (isEnterprise()) { + link.nested(FieldLink.on("f3")); + } + ArangoSearchCreateOptions options = new ArangoSearchCreateOptions() + .link(link) + .primarySortCache(true) + .primaryKeyCache(true); + StoredValue storedValue = new StoredValue(Arrays.asList("a", "b"), ArangoSearchCompression.none, true); + options.storedValues(storedValue); + String[] optimizeTopK = new String[]{"BM25(@doc) DESC", "TFIDF(@doc) DESC"}; + options.optimizeTopK(optimizeTopK); + + final ArangoSearchAsync view = db.arangoSearch(viewName); + view.create(options).get(); + + final ArangoSearchPropertiesEntity properties = view.getProperties().get(); + assertThat(properties).isNotNull(); + assertThat(properties.getId()).isNotNull(); + assertThat(properties.getName()).isEqualTo(viewName); + assertThat(properties.getType()).isEqualTo(ViewType.ARANGO_SEARCH); + assertThat(properties.getLinks()).isNotEmpty(); + + CollectionLink createdLink = properties.getLinks().iterator().next(); + assertThat(createdLink.getName()).isEqualTo(COLL_1); + assertThat(createdLink.getAnalyzers()).contains("identity"); + assertThat(createdLink.getIncludeAllFields()).isTrue(); + assertThat(createdLink.getStoreValues()).isEqualTo(StoreValuesType.ID); + assertThat(createdLink.getTrackListPositions()).isFalse(); + + FieldLink fieldLink = createdLink.getFields().iterator().next(); + if (isEnterprise()) { + assertThat(createdLink.getCache()).isTrue(); + assertThat(fieldLink.getCache()).isFalse(); + assertThat(properties.getPrimaryKeyCache()).isTrue(); + assertThat(properties.getPrimarySortCache()).isTrue(); + assertThat(properties.getStoredValues()) + .isNotEmpty() + .allSatisfy(it -> assertThat(it.getCache()).isTrue()); + } + + if (isEnterprise() && isAtLeastVersion(3, 10)) { + assertThat(createdLink.getNested()).isNotEmpty(); + FieldLink nested = createdLink.getNested().iterator().next(); + assertThat(nested.getName()).isEqualTo("f3"); + } + + assertThat(fieldLink.getName()).isEqualTo("f1"); + if (isEnterprise() && isAtLeastVersion(3, 10)) { + assertThat(fieldLink.getNested()).isNotEmpty(); + FieldLink nested = fieldLink.getNested().iterator().next(); + assertThat(nested.getName()).isEqualTo("f2"); + } + + if (isEnterprise() && isAtLeastVersion(3, 12)) { + assertThat(properties.getOptimizeTopK()).containsExactly(optimizeTopK); + } + + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void pipelineAnalyzer(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 8)); + + // comma delimiter + DelimiterAnalyzerProperties commaDelimiterProperties = new DelimiterAnalyzerProperties(); + commaDelimiterProperties.setDelimiter(","); + + DelimiterAnalyzer commaDelimiter = new DelimiterAnalyzer(); + commaDelimiter.setProperties(commaDelimiterProperties); + + // semicolon delimiter + DelimiterAnalyzerProperties semicolonDelimiterProperties = new DelimiterAnalyzerProperties(); + semicolonDelimiterProperties.setDelimiter(","); + + DelimiterAnalyzer semicolonDelimiter = new DelimiterAnalyzer(); + semicolonDelimiter.setProperties(semicolonDelimiterProperties); + + // stem + StemAnalyzerProperties stemAnalyzerProperties = new StemAnalyzerProperties(); + stemAnalyzerProperties.setLocale("en"); + + StemAnalyzer stemAnalyzer = new StemAnalyzer(); + stemAnalyzer.setProperties(stemAnalyzerProperties); + + // pipeline analyzer + Set features = new HashSet<>(); + features.add(AnalyzerFeature.frequency); + features.add(AnalyzerFeature.norm); + features.add(AnalyzerFeature.position); + + PipelineAnalyzerProperties properties = new PipelineAnalyzerProperties() + .addAnalyzer(commaDelimiter) + .addAnalyzer(semicolonDelimiter) + .addAnalyzer(stemAnalyzer); + + PipelineAnalyzer pipelineAnalyzer = new PipelineAnalyzer(); + pipelineAnalyzer.setName("test-" + UUID.randomUUID()); + pipelineAnalyzer.setProperties(properties); + pipelineAnalyzer.setFeatures(features); + + createGetAndDeleteTypedAnalyzer(db, pipelineAnalyzer); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void stopwordsAnalyzer(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 8)); + + Set features = new HashSet<>(); + features.add(AnalyzerFeature.frequency); + features.add(AnalyzerFeature.norm); + features.add(AnalyzerFeature.position); + + StopwordsAnalyzerProperties properties = new StopwordsAnalyzerProperties() + .addStopwordAsHex("616e64") + .addStopwordAsString("the"); + + assertThat(properties.getStopwordsAsStringList()).contains("and"); + assertThat(properties.getStopwordsAsHexList()).contains("746865"); + + StopwordsAnalyzer analyzer = new StopwordsAnalyzer(); + String name = "test-" + UUID.randomUUID(); + analyzer.setName(name); + analyzer.setProperties(properties); + analyzer.setFeatures(features); + + createGetAndDeleteTypedAnalyzer(db, analyzer); + db.createSearchAnalyzer(analyzer).get(); + Collection res = db.query("RETURN FLATTEN(TOKENS(SPLIT('the fox and the dog and a theater', ' '), " + + "@aName))", Collection.class, + Collections.singletonMap("aName", name)).get().getResult().get(0); + assertThat(res).containsExactly("fox", "dog", "a", "theater"); + db.deleteSearchAnalyzer(name).get(); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void aqlAnalyzer(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 8)); + + AQLAnalyzerProperties properties = new AQLAnalyzerProperties(); + properties.setBatchSize(2); + properties.setCollapsePositions(true); + properties.setKeepNull(false); + properties.setMemoryLimit(2200L); + properties.setQueryString("RETURN SOUNDEX(@param)"); + properties.setReturnType(AQLAnalyzerProperties.ReturnType.string); + + Set features = new HashSet<>(); + features.add(AnalyzerFeature.frequency); + features.add(AnalyzerFeature.norm); + features.add(AnalyzerFeature.position); + + AQLAnalyzer aqlAnalyzer = new AQLAnalyzer(); + aqlAnalyzer.setName("test-" + UUID.randomUUID()); + aqlAnalyzer.setProperties(properties); + aqlAnalyzer.setFeatures(features); + + createGetAndDeleteTypedAnalyzer(db, aqlAnalyzer); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void geoJsonAnalyzer(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 8)); + + GeoAnalyzerOptions options = new GeoAnalyzerOptions(); + options.setMaxLevel(10); + options.setMaxCells(11); + options.setMinLevel(8); + + GeoJSONAnalyzerProperties properties = new GeoJSONAnalyzerProperties(); + properties.setOptions(options); + properties.setType(GeoJSONAnalyzerProperties.GeoJSONAnalyzerType.point); + properties.setLegacy(true); + + Set features = new HashSet<>(); + features.add(AnalyzerFeature.frequency); + features.add(AnalyzerFeature.norm); + features.add(AnalyzerFeature.position); + + GeoJSONAnalyzer geoJSONAnalyzer = new GeoJSONAnalyzer(); + geoJSONAnalyzer.setName("test-" + UUID.randomUUID()); + geoJSONAnalyzer.setProperties(properties); + geoJSONAnalyzer.setFeatures(features); + + createGetAndDeleteTypedAnalyzer(db, geoJSONAnalyzer); + } + + + @ParameterizedTest + @MethodSource("asyncDbs") + void geoS2Analyzer(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isEnterprise()); + assumeTrue(isAtLeastVersion(3, 10, 5)); + + GeoAnalyzerOptions options = new GeoAnalyzerOptions(); + options.setMaxLevel(10); + options.setMaxCells(11); + options.setMinLevel(8); + + GeoS2AnalyzerProperties properties = new GeoS2AnalyzerProperties(); + properties.setOptions(options); + properties.setType(GeoS2AnalyzerProperties.GeoS2AnalyzerType.point); + properties.setFormat(GeoS2AnalyzerProperties.GeoS2Format.s2Point); + + Set features = new HashSet<>(); + features.add(AnalyzerFeature.frequency); + features.add(AnalyzerFeature.norm); + features.add(AnalyzerFeature.position); + + GeoS2Analyzer geoS2Analyzer = new GeoS2Analyzer(); + geoS2Analyzer.setName("test-" + UUID.randomUUID()); + geoS2Analyzer.setProperties(properties); + geoS2Analyzer.setFeatures(features); + + createGetAndDeleteTypedAnalyzer(db, geoS2Analyzer); + } + + + @ParameterizedTest + @MethodSource("asyncDbs") + void geoPointAnalyzer(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 8)); + + GeoAnalyzerOptions options = new GeoAnalyzerOptions(); + options.setMaxLevel(10); + options.setMaxCells(11); + options.setMinLevel(8); + + GeoPointAnalyzerProperties properties = new GeoPointAnalyzerProperties(); + properties.setLatitude(new String[]{"a", "b", "c"}); + properties.setLongitude(new String[]{"d", "e", "f"}); + properties.setOptions(options); + + Set features = new HashSet<>(); + features.add(AnalyzerFeature.frequency); + features.add(AnalyzerFeature.norm); + features.add(AnalyzerFeature.position); + + GeoPointAnalyzer geoPointAnalyzer = new GeoPointAnalyzer(); + geoPointAnalyzer.setName("test-" + UUID.randomUUID()); + geoPointAnalyzer.setProperties(properties); + geoPointAnalyzer.setFeatures(features); + + createGetAndDeleteTypedAnalyzer(db, geoPointAnalyzer); + } + + + @ParameterizedTest + @MethodSource("asyncDbs") + void segmentationAnalyzer(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 9)); + + SegmentationAnalyzerProperties properties = new SegmentationAnalyzerProperties(); + properties.setBreakMode(SegmentationAnalyzerProperties.BreakMode.graphic); + properties.setAnalyzerCase(SearchAnalyzerCase.upper); + + Set features = new HashSet<>(); + features.add(AnalyzerFeature.frequency); + features.add(AnalyzerFeature.norm); + features.add(AnalyzerFeature.position); + + SegmentationAnalyzer segmentationAnalyzer = new SegmentationAnalyzer(); + segmentationAnalyzer.setName("test-" + UUID.randomUUID()); + segmentationAnalyzer.setProperties(properties); + segmentationAnalyzer.setFeatures(features); + + createGetAndDeleteTypedAnalyzer(db, segmentationAnalyzer); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void collationAnalyzer(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 9)); + + CollationAnalyzerProperties properties = new CollationAnalyzerProperties(); + properties.setLocale("ru"); + + Set features = new HashSet<>(); + features.add(AnalyzerFeature.frequency); + features.add(AnalyzerFeature.norm); + features.add(AnalyzerFeature.position); + + CollationAnalyzer collationAnalyzer = new CollationAnalyzer(); + collationAnalyzer.setName("test-" + UUID.randomUUID()); + collationAnalyzer.setProperties(properties); + collationAnalyzer.setFeatures(features); + + createGetAndDeleteTypedAnalyzer(db, collationAnalyzer); + } + + + @DisabledIfSystemProperty(named = "skipStatefulTests", matches = "^(|true|1)$", disabledReason = "Test requires server with analyzer model located at `/tmp/foo.bin`") + @ParameterizedTest + @MethodSource("asyncDbs") + void classificationAnalyzer(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 10)); + assumeTrue(isEnterprise()); + + ClassificationAnalyzerProperties properties = new ClassificationAnalyzerProperties(); + properties.setModelLocation("/tmp/foo.bin"); + properties.setTopK(2); + properties.setThreshold(.5); + + Set features = new HashSet<>(); + features.add(AnalyzerFeature.frequency); + features.add(AnalyzerFeature.norm); + features.add(AnalyzerFeature.position); + + ClassificationAnalyzer analyzer = new ClassificationAnalyzer(); + analyzer.setName("test-" + UUID.randomUUID()); + analyzer.setProperties(properties); + analyzer.setFeatures(features); + + createGetAndDeleteTypedAnalyzer(db, analyzer); + } + + @DisabledIfSystemProperty(named = "skipStatefulTests", matches = "^(|true|1)$", disabledReason = "Test requires server with analyzer model located at `/tmp/foo.bin`") + @ParameterizedTest + @MethodSource("asyncDbs") + void nearestNeighborsAnalyzer(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 10)); + assumeTrue(isEnterprise()); + + NearestNeighborsAnalyzerProperties properties = new NearestNeighborsAnalyzerProperties(); + properties.setModelLocation("/tmp/foo.bin"); + properties.setTopK(2); + + Set features = new HashSet<>(); + features.add(AnalyzerFeature.frequency); + features.add(AnalyzerFeature.norm); + features.add(AnalyzerFeature.position); + + NearestNeighborsAnalyzer analyzer = new NearestNeighborsAnalyzer(); + analyzer.setName("test-" + UUID.randomUUID()); + analyzer.setProperties(properties); + analyzer.setFeatures(features); + + createGetAndDeleteTypedAnalyzer(db, analyzer); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void MinHashAnalyzer(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 10)); + assumeTrue(isEnterprise()); + + SegmentationAnalyzerProperties segProperties = new SegmentationAnalyzerProperties(); + segProperties.setBreakMode(SegmentationAnalyzerProperties.BreakMode.alpha); + segProperties.setAnalyzerCase(SearchAnalyzerCase.lower); + + SegmentationAnalyzer segAnalyzer = new SegmentationAnalyzer(); + segAnalyzer.setProperties(segProperties); + + MinHashAnalyzerProperties properties = new MinHashAnalyzerProperties(); + properties.setAnalyzer(segAnalyzer); + properties.setNumHashes(2); + + Set features = new HashSet<>(); + features.add(AnalyzerFeature.frequency); + features.add(AnalyzerFeature.norm); + features.add(AnalyzerFeature.position); + + MinHashAnalyzer analyzer = new MinHashAnalyzer(); + analyzer.setName("test-" + UUID.randomUUID()); + analyzer.setProperties(properties); + analyzer.setFeatures(features); + + createGetAndDeleteTypedAnalyzer(db, analyzer); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void WildcardAnalyzer(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 12)); + + NormAnalyzerProperties properties = new NormAnalyzerProperties(); + properties.setLocale("ru"); + properties.setAnalyzerCase(SearchAnalyzerCase.lower); + properties.setAccent(true); + + NormAnalyzer normAnalyzer = new NormAnalyzer(); + normAnalyzer.setProperties(properties); + + WildcardAnalyzerProperties wildcardProperties = new WildcardAnalyzerProperties(); + wildcardProperties.setNgramSize(3); + wildcardProperties.setAnalyzer(normAnalyzer); + + Set features = new HashSet<>(); + features.add(AnalyzerFeature.frequency); + features.add(AnalyzerFeature.position); + + WildcardAnalyzer wildcardAnalyzer = new WildcardAnalyzer(); + wildcardAnalyzer.setName("test-" + UUID.randomUUID()); + wildcardAnalyzer.setProperties(wildcardProperties); + wildcardAnalyzer.setFeatures(features); + + createGetAndDeleteTypedAnalyzer(db, wildcardAnalyzer); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void offsetFeature(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 10)); + + String name = "test-" + UUID.randomUUID(); + + Set features = new HashSet<>(); + features.add(AnalyzerFeature.frequency); + features.add(AnalyzerFeature.norm); + features.add(AnalyzerFeature.position); + features.add(AnalyzerFeature.offset); + + IdentityAnalyzer analyzer = new IdentityAnalyzer(); + analyzer.setFeatures(features); + analyzer.setName(name); + + createGetAndDeleteTypedAnalyzer(db, analyzer); + } + +} diff --git a/test-functional/src/test/java/com/arangodb/ArangoSearchTest.java b/test-functional/src/test/java/com/arangodb/ArangoSearchTest.java new file mode 100644 index 000000000..e29a6907e --- /dev/null +++ b/test-functional/src/test/java/com/arangodb/ArangoSearchTest.java @@ -0,0 +1,1118 @@ +/* + * DISCLAIMER + * + * Copyright 2018 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb; + +import com.arangodb.entity.InvertedIndexField; +import com.arangodb.entity.ViewEntity; +import com.arangodb.entity.ViewType; +import com.arangodb.entity.arangosearch.*; +import com.arangodb.entity.arangosearch.analyzer.*; +import com.arangodb.model.InvertedIndexOptions; +import com.arangodb.model.arangosearch.*; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.condition.DisabledIfSystemProperty; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; + +import java.util.*; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.catchThrowable; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + +/** + * @author Mark Vollmary + * @author Michele Rastelli + */ +class ArangoSearchTest extends BaseJunit5 { + + private static final String COLL_1 = "ArangoSearchTest_view_replace_prop"; + private static final String COLL_2 = "ArangoSearchTest_view_update_prop"; + + @BeforeAll + static void init() { + initCollections(COLL_1, COLL_2); + } + + @ParameterizedTest + @MethodSource("dbs") + void exists(ArangoDatabase db) { + assumeTrue(isAtLeastVersion(3, 4)); + String viewName = rndName(); + db.createArangoSearch(viewName, new ArangoSearchCreateOptions()); + assertThat(db.arangoSearch(viewName).exists()).isTrue(); + } + + @ParameterizedTest + @MethodSource("dbs") + void createAndExistsSearchAlias(ArangoDatabase db) { + assumeTrue(isAtLeastVersion(3, 10)); + String viewName = rndName(); + db.createSearchAlias(viewName, new SearchAliasCreateOptions()); + assertThat(db.arangoSearch(viewName).exists()).isTrue(); + } + + @ParameterizedTest + @MethodSource("dbs") + void getInfo(ArangoDatabase db) { + assumeTrue(isAtLeastVersion(3, 4)); + String viewName = rndName(); + db.createArangoSearch(viewName, new ArangoSearchCreateOptions()); + final ViewEntity info = db.arangoSearch(viewName).getInfo(); + assertThat(info).isNotNull(); + assertThat(info.getId()).isNotNull(); + assertThat(info.getName()).isEqualTo(viewName); + assertThat(info.getType()).isEqualTo(ViewType.ARANGO_SEARCH); + } + + @ParameterizedTest + @MethodSource("dbs") + void drop(ArangoDatabase db) { + assumeTrue(isAtLeastVersion(3, 4)); + String viewName = rndName(); + db.createArangoSearch(viewName, new ArangoSearchCreateOptions()); + final ArangoView view = db.arangoSearch(viewName); + view.drop(); + assertThat(view.exists()).isFalse(); + } + + @ParameterizedTest + @MethodSource("dbs") + void rename(ArangoDatabase db) { + assumeTrue(isSingleServer()); + assumeTrue(isAtLeastVersion(3, 4)); + String viewName = rndName(); + final String name = viewName + "_new"; + db.createArangoSearch(name, new ArangoSearchCreateOptions()); + db.arangoSearch(name).rename(viewName); + assertThat(db.arangoSearch(name).exists()).isFalse(); + assertThat(db.arangoSearch(viewName).exists()).isTrue(); + } + + @ParameterizedTest + @MethodSource("dbs") + void createArangoSearchView(ArangoDatabase db) { + assumeTrue(isAtLeastVersion(3, 4)); + String viewName = rndName(); + final ViewEntity info = db.arangoSearch(viewName).create(); + assertThat(info).isNotNull(); + assertThat(info.getId()).isNotNull(); + assertThat(info.getName()).isEqualTo(viewName); + assertThat(info.getType()).isEqualTo(ViewType.ARANGO_SEARCH); + assertThat(db.arangoSearch(viewName).exists()).isTrue(); + } + + @ParameterizedTest + @MethodSource("dbs") + void createSearchAliasView(ArangoDatabase db) { + assumeTrue(isAtLeastVersion(3, 10)); + String viewName = rndName(); + final ViewEntity info = db.searchAlias(viewName).create(); + assertThat(info).isNotNull(); + assertThat(info.getId()).isNotNull(); + assertThat(info.getName()).isEqualTo(viewName); + assertThat(info.getType()).isEqualTo(ViewType.SEARCH_ALIAS); + assertThat(db.searchAlias(viewName).exists()).isTrue(); + } + + @ParameterizedTest + @MethodSource("dbs") + void createArangoSearchViewWithOptions(ArangoDatabase db) { + assumeTrue(isAtLeastVersion(3, 4)); + String viewName = rndName(); + final ArangoSearchCreateOptions options = new ArangoSearchCreateOptions(); + final ViewEntity info = db.arangoSearch(viewName).create(options); + assertThat(info).isNotNull(); + assertThat(info.getId()).isNotNull(); + assertThat(info.getName()).isEqualTo(viewName); + assertThat(info.getType()).isEqualTo(ViewType.ARANGO_SEARCH); + assertThat(db.arangoSearch(viewName).exists()).isTrue(); + } + + @ParameterizedTest + @MethodSource("dbs") + void createArangoSearchViewWithPrimarySort(ArangoDatabase db) { + assumeTrue(isAtLeastVersion(3, 5)); + String viewName = rndName(); + final ArangoSearchCreateOptions options = new ArangoSearchCreateOptions(); + + final PrimarySort primarySort = PrimarySort.on("myFieldName"); + primarySort.ascending(false); + options.primarySort(primarySort); + options.primarySortCompression(ArangoSearchCompression.none); + options.consolidationIntervalMsec(666666L); + StoredValue storedValue = new StoredValue(Arrays.asList("a", "b"), ArangoSearchCompression.none); + options.storedValues(storedValue); + + final ArangoSearch view = db.arangoSearch(viewName); + final ViewEntity info = view.create(options); + assertThat(info).isNotNull(); + assertThat(info.getId()).isNotNull(); + assertThat(info.getName()).isEqualTo(viewName); + assertThat(info.getType()).isEqualTo(ViewType.ARANGO_SEARCH); + assertThat(db.arangoSearch(viewName).exists()).isTrue(); + + if (isAtLeastVersion(3, 7)) { + final ArangoSearchPropertiesEntity properties = view.getProperties(); + assertThat(properties.getPrimarySortCompression()).isEqualTo(ArangoSearchCompression.none); + Collection retrievedStoredValues = properties.getStoredValues(); + assertThat(retrievedStoredValues).isNotNull(); + assertThat(retrievedStoredValues).hasSize(1); + StoredValue retrievedStoredValue = retrievedStoredValues.iterator().next(); + assertThat(retrievedStoredValue).isNotNull(); + assertThat(retrievedStoredValue.getFields()).isEqualTo(storedValue.getFields()); + assertThat(retrievedStoredValue.getCompression()).isEqualTo(storedValue.getCompression()); + assertThat(properties.getPrimarySort()) + .hasSize(1) + .allSatisfy(ps -> { + assertThat(ps).isNotNull(); + assertThat(ps.getField()).isEqualTo(primarySort.getField()); + assertThat(ps.getAscending()).isEqualTo(primarySort.getAscending()); + }); + } + } + + @ParameterizedTest + @MethodSource("dbs") + void createArangoSearchViewWithCommitIntervalMsec(ArangoDatabase db) { + assumeTrue(isAtLeastVersion(3, 5)); + String viewName = rndName(); + final ArangoSearchCreateOptions options = new ArangoSearchCreateOptions(); + options.commitIntervalMsec(666666L); + + final ViewEntity info = db.arangoSearch(viewName).create(options); + assertThat(info).isNotNull(); + assertThat(info.getId()).isNotNull(); + assertThat(info.getName()).isEqualTo(viewName); + assertThat(info.getType()).isEqualTo(ViewType.ARANGO_SEARCH); + assertThat(db.arangoSearch(viewName).exists()).isTrue(); + + // check commit interval msec property + final ArangoSearch view = db.arangoSearch(viewName); + final ArangoSearchPropertiesEntity properties = view.getProperties(); + assertThat(properties.getCommitIntervalMsec()).isEqualTo(666666L); + } + + @ParameterizedTest + @MethodSource("dbs") + void createSearchAliasViewWithOptions(ArangoDatabase db) { + assumeTrue(isAtLeastVersion(3, 10)); + String viewName = rndName(); + final SearchAliasCreateOptions options = new SearchAliasCreateOptions(); + final ViewEntity info = db.searchAlias(viewName).create(options); + assertThat(info).isNotNull(); + assertThat(info.getId()).isNotNull(); + assertThat(info.getName()).isEqualTo(viewName); + assertThat(info.getType()).isEqualTo(ViewType.SEARCH_ALIAS); + assertThat(db.searchAlias(viewName).exists()).isTrue(); + } + + @ParameterizedTest + @MethodSource("dbs") + void createSearchAliasViewWithIndexesAndGetProperties(ArangoDatabase db) { + assumeTrue(isAtLeastVersion(3, 10)); + ArangoCollection col = db.collection(COLL_1); + String idxName1 = rndName(); + col.ensureInvertedIndex(new InvertedIndexOptions() + .name(idxName1) + .fields(new InvertedIndexField().name("a" + rnd()))); + + String idxName2 = rndName(); + col.ensureInvertedIndex(new InvertedIndexOptions() + .name(idxName2) + .fields(new InvertedIndexField().name("a" + rnd()))); + + String viewName = rndName(); + final SearchAliasCreateOptions options = new SearchAliasCreateOptions() + .indexes( + new SearchAliasIndex(COLL_1, idxName1, SearchAliasIndex.OperationType.add), + new SearchAliasIndex(COLL_1, idxName2, SearchAliasIndex.OperationType.add), + new SearchAliasIndex(COLL_1, idxName2, SearchAliasIndex.OperationType.del) + ); + final ViewEntity info = db.searchAlias(viewName).create(options); + assertThat(info).isNotNull(); + assertThat(info.getId()).isNotNull(); + assertThat(info.getName()).isEqualTo(viewName); + assertThat(info.getType()).isEqualTo(ViewType.SEARCH_ALIAS); + + final SearchAliasPropertiesEntity properties = db.searchAlias(viewName).getProperties(); + assertThat(properties).isNotNull(); + assertThat(properties.getId()).isNotNull(); + assertThat(properties.getName()).isEqualTo(viewName); + assertThat(properties.getType()).isEqualTo(ViewType.SEARCH_ALIAS); + assertThat(properties.getIndexes()) + .isNotNull() + .isNotEmpty() + .anyMatch(i -> i.getCollection().equals(COLL_1) && i.getIndex().equals(idxName1)); + } + + @ParameterizedTest + @MethodSource("dbs") + void getArangoSearchViewProperties(ArangoDatabase db) { + assumeTrue(isAtLeastVersion(3, 4)); + String viewName = rndName(); + final ArangoSearch view = db.arangoSearch(viewName); + view.create(new ArangoSearchCreateOptions()); + final ArangoSearchPropertiesEntity properties = view.getProperties(); + assertThat(properties).isNotNull(); + assertThat(properties.getId()).isNotNull(); + assertThat(properties.getName()).isEqualTo(viewName); + assertThat(properties.getType()).isEqualTo(ViewType.ARANGO_SEARCH); + assertThat(properties.getConsolidationIntervalMsec()).isNotNull(); + assertThat(properties.getCleanupIntervalStep()).isNotNull(); + final ConsolidationPolicy consolidate = properties.getConsolidationPolicy(); + assertThat(consolidate).isNotNull(); + final Collection links = properties.getLinks(); + assertThat(links).isEmpty(); + } + + @ParameterizedTest + @MethodSource("dbs") + void updateArangoSearchViewProperties(ArangoDatabase db) { + assumeTrue(isAtLeastVersion(3, 4)); + String viewName = rndName(); + final ArangoSearch view = db.arangoSearch(viewName); + view.create(new ArangoSearchCreateOptions()); + final ArangoSearchPropertiesOptions options = new ArangoSearchPropertiesOptions(); + options.cleanupIntervalStep(15L); + options.consolidationIntervalMsec(65000L); + options.consolidationPolicy(ConsolidationPolicy.of(ConsolidationType.BYTES_ACCUM).threshold(1.)); + options.link(CollectionLink.on(COLL_2) + .fields(FieldLink.on("value").analyzers("identity").trackListPositions(true).includeAllFields(true) + .storeValues(StoreValuesType.ID))); + final ArangoSearchPropertiesEntity properties = view.updateProperties(options); + assertThat(properties).isNotNull(); + assertThat(properties.getCleanupIntervalStep()).isEqualTo(15L); + assertThat(properties.getConsolidationIntervalMsec()).isEqualTo(65000L); + final ConsolidationPolicy consolidate = properties.getConsolidationPolicy(); + assertThat(consolidate).isNotNull(); + assertThat(consolidate.getType()).isEqualTo(ConsolidationType.BYTES_ACCUM); + assertThat(consolidate.getThreshold()).isEqualTo(1.); + assertThat(properties.getLinks()).hasSize(1); + final CollectionLink link = properties.getLinks().iterator().next(); + assertThat(link.getName()).isEqualTo(COLL_2); + assertThat(link.getFields()).hasSize(1); + final FieldLink next = link.getFields().iterator().next(); + assertThat(next.getName()).isEqualTo("value"); + assertThat(next.getIncludeAllFields()).isTrue(); + assertThat(next.getTrackListPositions()).isTrue(); + assertThat(next.getStoreValues()).isEqualTo(StoreValuesType.ID); + } + + @ParameterizedTest + @MethodSource("dbs") + void updateSearchAliasViewWithIndexesAndGetProperties(ArangoDatabase db) { + assumeTrue(isAtLeastVersion(3, 10)); + ArangoCollection col = db.collection(COLL_1); + String idxName = rndName(); + col.ensureInvertedIndex(new InvertedIndexOptions() + .name(idxName) + .fields(new InvertedIndexField().name("a" + rnd()))); + ArangoCollection col2 = db.collection(COLL_2); + String idxName2 = rndName(); + col2.ensureInvertedIndex(new InvertedIndexOptions() + .name(idxName2) + .fields(new InvertedIndexField().name("a" + rnd()))); + + String viewName = rndName(); + final SearchAliasCreateOptions options = new SearchAliasCreateOptions() + .indexes(new SearchAliasIndex(COLL_1, idxName)); + final ViewEntity info = db.searchAlias(viewName).create(options); + db.searchAlias(viewName).updateProperties(new SearchAliasPropertiesOptions() + .indexes(new SearchAliasIndex(COLL_2, idxName2))); + + assertThat(info).isNotNull(); + assertThat(info.getId()).isNotNull(); + assertThat(info.getName()).isEqualTo(viewName); + assertThat(info.getType()).isEqualTo(ViewType.SEARCH_ALIAS); + + final SearchAliasPropertiesEntity properties = db.searchAlias(viewName).getProperties(); + assertThat(properties).isNotNull(); + assertThat(properties.getId()).isNotNull(); + assertThat(properties.getName()).isEqualTo(viewName); + assertThat(properties.getType()).isEqualTo(ViewType.SEARCH_ALIAS); + assertThat(properties.getIndexes()) + .isNotNull() + .isNotEmpty() + .hasSize(2) + .anyMatch(i -> i.getCollection().equals(COLL_1) && i.getIndex().equals(idxName)) + .anyMatch(i -> i.getCollection().equals(COLL_2) && i.getIndex().equals(idxName2)); + } + + @ParameterizedTest + @MethodSource("dbs") + void replaceArangoSearchViewProperties(ArangoDatabase db) { + assumeTrue(isAtLeastVersion(3, 4)); + String viewName = rndName(); + final ArangoSearch view = db.arangoSearch(viewName); + view.create(new ArangoSearchCreateOptions()); + final ArangoSearchPropertiesOptions options = new ArangoSearchPropertiesOptions(); + options.link(CollectionLink.on(COLL_1) + .fields(FieldLink.on("value").analyzers("identity"))); + final ArangoSearchPropertiesEntity properties = view.replaceProperties(options); + assertThat(properties).isNotNull(); + assertThat(properties.getLinks()).hasSize(1); + final CollectionLink link = properties.getLinks().iterator().next(); + assertThat(link.getName()).isEqualTo(COLL_1); + assertThat(link.getFields()).hasSize(1); + assertThat(link.getFields().iterator().next().getName()).isEqualTo("value"); + } + + @ParameterizedTest + @MethodSource("dbs") + void replaceSearchAliasViewWithIndexesAndGetProperties(ArangoDatabase db) { + assumeTrue(isAtLeastVersion(3, 10)); + ArangoCollection col = db.collection(COLL_1); + String idxName = rndName(); + col.ensureInvertedIndex(new InvertedIndexOptions() + .name(idxName) + .fields(new InvertedIndexField().name("a" + rnd()))); + ArangoCollection col2 = db.collection(COLL_2); + String idxName2 = rndName(); + col2.ensureInvertedIndex(new InvertedIndexOptions() + .name(idxName2) + .fields(new InvertedIndexField().name("a" + rnd()))); + + String viewName = rndName(); + final SearchAliasCreateOptions options = new SearchAliasCreateOptions() + .indexes(new SearchAliasIndex(COLL_1, idxName)); + final ViewEntity info = db.searchAlias(viewName).create(options); + db.searchAlias(viewName).replaceProperties(new SearchAliasPropertiesOptions() + .indexes(new SearchAliasIndex(COLL_2, idxName2))); + + assertThat(info).isNotNull(); + assertThat(info.getId()).isNotNull(); + assertThat(info.getName()).isEqualTo(viewName); + assertThat(info.getType()).isEqualTo(ViewType.SEARCH_ALIAS); + + final SearchAliasPropertiesEntity properties = db.searchAlias(viewName).getProperties(); + assertThat(properties).isNotNull(); + assertThat(properties.getId()).isNotNull(); + assertThat(properties.getName()).isEqualTo(viewName); + assertThat(properties.getType()).isEqualTo(ViewType.SEARCH_ALIAS); + assertThat(properties.getIndexes()) + .isNotNull() + .isNotEmpty() + .hasSize(1) + .anyMatch(i -> i.getCollection().equals(COLL_2) && i.getIndex().equals(idxName2)); + } + + private void createGetAndDeleteTypedAnalyzer(ArangoDatabase db, SearchAnalyzer analyzer) { + + String fullyQualifiedName = db.name() + "::" + analyzer.getName(); + analyzer.setName(fullyQualifiedName); + + // createAnalyzer + SearchAnalyzer createdAnalyzer = db.createSearchAnalyzer(analyzer); + assertThat(createdAnalyzer).isEqualTo(analyzer); + + // getAnalyzer + SearchAnalyzer gotAnalyzer = db.getSearchAnalyzer(analyzer.getName()); + assertThat(gotAnalyzer).isEqualTo(analyzer); + + // getAnalyzers + SearchAnalyzer foundAnalyzer = + db.getSearchAnalyzers().stream().filter(it -> it.getName().equals(fullyQualifiedName)) + .findFirst().get(); + assertThat(foundAnalyzer).isEqualTo(analyzer); + + // deleteAnalyzer + AnalyzerDeleteOptions deleteOptions = new AnalyzerDeleteOptions(); + deleteOptions.setForce(true); + + db.deleteSearchAnalyzer(analyzer.getName(), deleteOptions); + + Throwable thrown = catchThrowable(() -> db.getSearchAnalyzer(analyzer.getName())); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + ArangoDBException e = (ArangoDBException) thrown; + assertThat(e.getResponseCode()).isEqualTo(404); + assertThat(e.getErrorNum()).isEqualTo(1202); + } + + @ParameterizedTest + @MethodSource("dbs") + void identityAnalyzerTyped(ArangoDatabase db) { + assumeTrue(isAtLeastVersion(3, 5)); + + String name = "test-" + UUID.randomUUID(); + + Set features = new HashSet<>(); + features.add(AnalyzerFeature.frequency); + features.add(AnalyzerFeature.norm); + features.add(AnalyzerFeature.position); + + IdentityAnalyzer analyzer = new IdentityAnalyzer(); + analyzer.setFeatures(features); + analyzer.setName(name); + + createGetAndDeleteTypedAnalyzer(db, analyzer); + } + + @ParameterizedTest + @MethodSource("dbs") + void delimiterAnalyzerTyped(ArangoDatabase db) { + assumeTrue(isAtLeastVersion(3, 5)); + + String name = "test-" + UUID.randomUUID(); + + Set features = new HashSet<>(); + features.add(AnalyzerFeature.frequency); + features.add(AnalyzerFeature.norm); + features.add(AnalyzerFeature.position); + + DelimiterAnalyzerProperties properties = new DelimiterAnalyzerProperties(); + properties.setDelimiter("-"); + + DelimiterAnalyzer analyzer = new DelimiterAnalyzer(); + analyzer.setFeatures(features); + analyzer.setName(name); + analyzer.setProperties(properties); + + createGetAndDeleteTypedAnalyzer(db, analyzer); + } + + @ParameterizedTest + @MethodSource("dbs") + void multiDelimiterAnalyzerTyped(ArangoDatabase db) { + assumeTrue(isAtLeastVersion(3, 12)); + + String name = "test-" + UUID.randomUUID(); + + Set features = new HashSet<>(); + features.add(AnalyzerFeature.frequency); + features.add(AnalyzerFeature.norm); + features.add(AnalyzerFeature.position); + + MultiDelimiterAnalyzerProperties properties = new MultiDelimiterAnalyzerProperties(); + properties.setDelimiters("-", ",", "..."); + + MultiDelimiterAnalyzer analyzer = new MultiDelimiterAnalyzer(); + analyzer.setFeatures(features); + analyzer.setName(name); + analyzer.setProperties(properties); + + createGetAndDeleteTypedAnalyzer(db, analyzer); + } + + @ParameterizedTest + @MethodSource("dbs") + void stemAnalyzerTyped(ArangoDatabase db) { + assumeTrue(isAtLeastVersion(3, 5)); + + String name = "test-" + UUID.randomUUID(); + + Set features = new HashSet<>(); + features.add(AnalyzerFeature.frequency); + features.add(AnalyzerFeature.norm); + features.add(AnalyzerFeature.position); + + StemAnalyzerProperties properties = new StemAnalyzerProperties(); + properties.setLocale("ru"); + + StemAnalyzer options = new StemAnalyzer(); + options.setFeatures(features); + options.setName(name); + options.setProperties(properties); + + createGetAndDeleteTypedAnalyzer(db, options); + } + + @ParameterizedTest + @MethodSource("dbs") + void normAnalyzerTyped(ArangoDatabase db) { + assumeTrue(isAtLeastVersion(3, 5)); + + String name = "test-" + UUID.randomUUID(); + + Set features = new HashSet<>(); + features.add(AnalyzerFeature.frequency); + features.add(AnalyzerFeature.norm); + features.add(AnalyzerFeature.position); + + NormAnalyzerProperties properties = new NormAnalyzerProperties(); + properties.setLocale("ru"); + properties.setAnalyzerCase(SearchAnalyzerCase.lower); + properties.setAccent(true); + + NormAnalyzer options = new NormAnalyzer(); + options.setFeatures(features); + options.setName(name); + options.setProperties(properties); + + createGetAndDeleteTypedAnalyzer(db, options); + } + + @ParameterizedTest + @MethodSource("dbs") + void ngramAnalyzerTyped(ArangoDatabase db) { + assumeTrue(isAtLeastVersion(3, 5)); + + String name = "test-" + UUID.randomUUID(); + + Set features = new HashSet<>(); + features.add(AnalyzerFeature.frequency); + features.add(AnalyzerFeature.norm); + features.add(AnalyzerFeature.position); + + NGramAnalyzerProperties properties = new NGramAnalyzerProperties(); + properties.setMax(6L); + properties.setMin(3L); + properties.setPreserveOriginal(true); + + NGramAnalyzer analyzer = new NGramAnalyzer(); + analyzer.setFeatures(features); + analyzer.setName(name); + analyzer.setType(AnalyzerType.ngram); + analyzer.setProperties(properties); + + createGetAndDeleteTypedAnalyzer(db, analyzer); + } + + @ParameterizedTest + @MethodSource("dbs") + void enhancedNgramAnalyzerTyped(ArangoDatabase db) { + assumeTrue(isAtLeastVersion(3, 6)); + + String name = "test-" + UUID.randomUUID(); + + Set features = new HashSet<>(); + features.add(AnalyzerFeature.frequency); + features.add(AnalyzerFeature.norm); + features.add(AnalyzerFeature.position); + + NGramAnalyzerProperties properties = new NGramAnalyzerProperties(); + properties.setMax(6L); + properties.setMin(3L); + properties.setPreserveOriginal(true); + properties.setStartMarker("^"); + properties.setEndMarker("^"); + properties.setStreamType(StreamType.utf8); + + NGramAnalyzer analyzer = new NGramAnalyzer(); + analyzer.setFeatures(features); + analyzer.setName(name); + analyzer.setProperties(properties); + + createGetAndDeleteTypedAnalyzer(db, analyzer); + } + + @ParameterizedTest + @MethodSource("dbs") + void textAnalyzerTyped(ArangoDatabase db) { + assumeTrue(isAtLeastVersion(3, 5)); + + String name = "test-" + UUID.randomUUID(); + + Set features = new HashSet<>(); + features.add(AnalyzerFeature.frequency); + features.add(AnalyzerFeature.norm); + features.add(AnalyzerFeature.position); + + TextAnalyzerProperties properties = new TextAnalyzerProperties(); + properties.setLocale("ru"); + properties.setAnalyzerCase(SearchAnalyzerCase.lower); + properties.setAccent(true); + properties.setStemming(true); + + TextAnalyzer analyzer = new TextAnalyzer(); + analyzer.setFeatures(features); + analyzer.setName(name); + analyzer.setType(AnalyzerType.text); + analyzer.setProperties(properties); + + createGetAndDeleteTypedAnalyzer(db, analyzer); + } + + @ParameterizedTest + @MethodSource("dbs") + void enhancedTextAnalyzerTyped(ArangoDatabase db) { + assumeTrue(isAtLeastVersion(3, 6)); + + String name = "test-" + UUID.randomUUID(); + + Set features = new HashSet<>(); + features.add(AnalyzerFeature.frequency); + features.add(AnalyzerFeature.norm); + features.add(AnalyzerFeature.position); + + EdgeNgram edgeNgram = new EdgeNgram(); + edgeNgram.setMin(2L); + edgeNgram.setMax(100000L); + edgeNgram.setPreserveOriginal(true); + + TextAnalyzerProperties properties = new TextAnalyzerProperties(); + properties.setLocale("ru"); + properties.setAnalyzerCase(SearchAnalyzerCase.lower); + properties.setAccent(true); + properties.setStemming(true); + properties.setEdgeNgram(edgeNgram); + + TextAnalyzer analyzer = new TextAnalyzer(); + analyzer.setFeatures(features); + analyzer.setName(name); + analyzer.setProperties(properties); + + createGetAndDeleteTypedAnalyzer(db, analyzer); + } + + @ParameterizedTest + @MethodSource("dbs") + void arangoSearchOptions(ArangoDatabase db) { + assumeTrue(isAtLeastVersion(3, 4)); + String viewName = rndName(); + FieldLink field = FieldLink.on("f1") + .inBackground(true) + .cache(false); + if (isEnterprise()) { + field.nested(FieldLink.on("f2")); + } + CollectionLink link = CollectionLink.on(COLL_1) + .analyzers("identity") + .fields(field) + .includeAllFields(true) + .storeValues(StoreValuesType.ID) + .trackListPositions(false) + .inBackground(true) + .cache(true); + + if (isEnterprise()) { + link.nested(FieldLink.on("f3")); + } + ArangoSearchCreateOptions options = new ArangoSearchCreateOptions() + .link(link) + .primarySortCache(true) + .primaryKeyCache(true); + StoredValue storedValue = new StoredValue(Arrays.asList("a", "b"), ArangoSearchCompression.none, true); + options.storedValues(storedValue); + String[] optimizeTopK = new String[]{"BM25(@doc) DESC", "TFIDF(@doc) DESC"}; + options.optimizeTopK(optimizeTopK); + + final ArangoSearch view = db.arangoSearch(viewName); + view.create(options); + + final ArangoSearchPropertiesEntity properties = view.getProperties(); + assertThat(properties).isNotNull(); + assertThat(properties.getId()).isNotNull(); + assertThat(properties.getName()).isEqualTo(viewName); + assertThat(properties.getType()).isEqualTo(ViewType.ARANGO_SEARCH); + assertThat(properties.getLinks()).isNotEmpty(); + + CollectionLink createdLink = properties.getLinks().iterator().next(); + assertThat(createdLink.getName()).isEqualTo(COLL_1); + assertThat(createdLink.getAnalyzers()).contains("identity"); + assertThat(createdLink.getIncludeAllFields()).isTrue(); + assertThat(createdLink.getStoreValues()).isEqualTo(StoreValuesType.ID); + assertThat(createdLink.getTrackListPositions()).isFalse(); + + FieldLink fieldLink = createdLink.getFields().iterator().next(); + if (isEnterprise()) { + assertThat(createdLink.getCache()).isTrue(); + assertThat(fieldLink.getCache()).isFalse(); + assertThat(properties.getPrimaryKeyCache()).isTrue(); + assertThat(properties.getPrimarySortCache()).isTrue(); + assertThat(properties.getStoredValues()) + .isNotEmpty() + .allSatisfy(it -> assertThat(it.getCache()).isTrue()); + } + + if (isEnterprise() && isAtLeastVersion(3, 10)) { + assertThat(createdLink.getNested()).isNotEmpty(); + FieldLink nested = createdLink.getNested().iterator().next(); + assertThat(nested.getName()).isEqualTo("f3"); + } + + assertThat(fieldLink.getName()).isEqualTo("f1"); + if (isEnterprise() && isAtLeastVersion(3, 10)) { + assertThat(fieldLink.getNested()).isNotEmpty(); + FieldLink nested = fieldLink.getNested().iterator().next(); + assertThat(nested.getName()).isEqualTo("f2"); + } + + if (isEnterprise() && isAtLeastVersion(3, 12)) { + assertThat(properties.getOptimizeTopK()).containsExactly(optimizeTopK); + } + + } + + @ParameterizedTest + @MethodSource("dbs") + void pipelineAnalyzer(ArangoDatabase db) { + assumeTrue(isAtLeastVersion(3, 8)); + + // comma delimiter + DelimiterAnalyzerProperties commaDelimiterProperties = new DelimiterAnalyzerProperties(); + commaDelimiterProperties.setDelimiter(","); + + DelimiterAnalyzer commaDelimiter = new DelimiterAnalyzer(); + commaDelimiter.setProperties(commaDelimiterProperties); + + // semicolon delimiter + DelimiterAnalyzerProperties semicolonDelimiterProperties = new DelimiterAnalyzerProperties(); + semicolonDelimiterProperties.setDelimiter(","); + + DelimiterAnalyzer semicolonDelimiter = new DelimiterAnalyzer(); + semicolonDelimiter.setProperties(semicolonDelimiterProperties); + + // stem + StemAnalyzerProperties stemAnalyzerProperties = new StemAnalyzerProperties(); + stemAnalyzerProperties.setLocale("en"); + + StemAnalyzer stemAnalyzer = new StemAnalyzer(); + stemAnalyzer.setProperties(stemAnalyzerProperties); + + // pipeline analyzer + Set features = new HashSet<>(); + features.add(AnalyzerFeature.frequency); + features.add(AnalyzerFeature.norm); + features.add(AnalyzerFeature.position); + + PipelineAnalyzerProperties properties = new PipelineAnalyzerProperties() + .addAnalyzer(commaDelimiter) + .addAnalyzer(semicolonDelimiter) + .addAnalyzer(stemAnalyzer); + + PipelineAnalyzer pipelineAnalyzer = new PipelineAnalyzer(); + pipelineAnalyzer.setName("test-" + UUID.randomUUID()); + pipelineAnalyzer.setProperties(properties); + pipelineAnalyzer.setFeatures(features); + + createGetAndDeleteTypedAnalyzer(db, pipelineAnalyzer); + } + + @ParameterizedTest + @MethodSource("dbs") + void stopwordsAnalyzer(ArangoDatabase db) { + assumeTrue(isAtLeastVersion(3, 8)); + + Set features = new HashSet<>(); + features.add(AnalyzerFeature.frequency); + features.add(AnalyzerFeature.norm); + features.add(AnalyzerFeature.position); + + StopwordsAnalyzerProperties properties = new StopwordsAnalyzerProperties() + .addStopwordAsHex("616e64") + .addStopwordAsString("the"); + + assertThat(properties.getStopwordsAsStringList()).contains("and"); + assertThat(properties.getStopwordsAsHexList()).contains("746865"); + + StopwordsAnalyzer analyzer = new StopwordsAnalyzer(); + String name = "test-" + UUID.randomUUID(); + analyzer.setName(name); + analyzer.setProperties(properties); + analyzer.setFeatures(features); + + createGetAndDeleteTypedAnalyzer(db, analyzer); + db.createSearchAnalyzer(analyzer); + Collection res = db.query("RETURN FLATTEN(TOKENS(SPLIT('the fox and the dog and a theater', ' '), " + + "@aName))", Collection.class, + Collections.singletonMap("aName", name)).next(); + assertThat(res).containsExactly("fox", "dog", "a", "theater"); + db.deleteSearchAnalyzer(name); + } + + @ParameterizedTest + @MethodSource("dbs") + void aqlAnalyzer(ArangoDatabase db) { + assumeTrue(isAtLeastVersion(3, 8)); + + AQLAnalyzerProperties properties = new AQLAnalyzerProperties(); + properties.setBatchSize(2); + properties.setCollapsePositions(true); + properties.setKeepNull(false); + properties.setMemoryLimit(2200L); + properties.setQueryString("RETURN SOUNDEX(@param)"); + properties.setReturnType(AQLAnalyzerProperties.ReturnType.string); + + Set features = new HashSet<>(); + features.add(AnalyzerFeature.frequency); + features.add(AnalyzerFeature.norm); + features.add(AnalyzerFeature.position); + + AQLAnalyzer aqlAnalyzer = new AQLAnalyzer(); + aqlAnalyzer.setName("test-" + UUID.randomUUID()); + aqlAnalyzer.setProperties(properties); + aqlAnalyzer.setFeatures(features); + + createGetAndDeleteTypedAnalyzer(db, aqlAnalyzer); + } + + @ParameterizedTest + @MethodSource("dbs") + void geoJsonAnalyzer(ArangoDatabase db) { + assumeTrue(isAtLeastVersion(3, 8)); + + GeoAnalyzerOptions options = new GeoAnalyzerOptions(); + options.setMaxLevel(10); + options.setMaxCells(11); + options.setMinLevel(8); + + GeoJSONAnalyzerProperties properties = new GeoJSONAnalyzerProperties(); + properties.setOptions(options); + properties.setType(GeoJSONAnalyzerProperties.GeoJSONAnalyzerType.point); + properties.setLegacy(true); + + Set features = new HashSet<>(); + features.add(AnalyzerFeature.frequency); + features.add(AnalyzerFeature.norm); + features.add(AnalyzerFeature.position); + + GeoJSONAnalyzer geoJSONAnalyzer = new GeoJSONAnalyzer(); + geoJSONAnalyzer.setName("test-" + UUID.randomUUID()); + geoJSONAnalyzer.setProperties(properties); + geoJSONAnalyzer.setFeatures(features); + + createGetAndDeleteTypedAnalyzer(db, geoJSONAnalyzer); + } + + + @ParameterizedTest + @MethodSource("dbs") + void geoS2Analyzer(ArangoDatabase db) { + assumeTrue(isEnterprise()); + assumeTrue(isAtLeastVersion(3, 10, 5)); + + GeoAnalyzerOptions options = new GeoAnalyzerOptions(); + options.setMaxLevel(10); + options.setMaxCells(11); + options.setMinLevel(8); + + GeoS2AnalyzerProperties properties = new GeoS2AnalyzerProperties(); + properties.setOptions(options); + properties.setType(GeoS2AnalyzerProperties.GeoS2AnalyzerType.point); + properties.setFormat(GeoS2AnalyzerProperties.GeoS2Format.s2Point); + + Set features = new HashSet<>(); + features.add(AnalyzerFeature.frequency); + features.add(AnalyzerFeature.norm); + features.add(AnalyzerFeature.position); + + GeoS2Analyzer geoS2Analyzer = new GeoS2Analyzer(); + geoS2Analyzer.setName("test-" + UUID.randomUUID()); + geoS2Analyzer.setProperties(properties); + geoS2Analyzer.setFeatures(features); + + createGetAndDeleteTypedAnalyzer(db, geoS2Analyzer); + } + + + @ParameterizedTest + @MethodSource("dbs") + void geoPointAnalyzer(ArangoDatabase db) { + assumeTrue(isAtLeastVersion(3, 8)); + + GeoAnalyzerOptions options = new GeoAnalyzerOptions(); + options.setMaxLevel(10); + options.setMaxCells(11); + options.setMinLevel(8); + + GeoPointAnalyzerProperties properties = new GeoPointAnalyzerProperties(); + properties.setLatitude(new String[]{"a", "b", "c"}); + properties.setLongitude(new String[]{"d", "e", "f"}); + properties.setOptions(options); + + Set features = new HashSet<>(); + features.add(AnalyzerFeature.frequency); + features.add(AnalyzerFeature.norm); + features.add(AnalyzerFeature.position); + + GeoPointAnalyzer geoPointAnalyzer = new GeoPointAnalyzer(); + geoPointAnalyzer.setName("test-" + UUID.randomUUID()); + geoPointAnalyzer.setProperties(properties); + geoPointAnalyzer.setFeatures(features); + + createGetAndDeleteTypedAnalyzer(db, geoPointAnalyzer); + } + + + @ParameterizedTest + @MethodSource("dbs") + void segmentationAnalyzer(ArangoDatabase db) { + assumeTrue(isAtLeastVersion(3, 9)); + + SegmentationAnalyzerProperties properties = new SegmentationAnalyzerProperties(); + properties.setBreakMode(SegmentationAnalyzerProperties.BreakMode.graphic); + properties.setAnalyzerCase(SearchAnalyzerCase.upper); + + Set features = new HashSet<>(); + features.add(AnalyzerFeature.frequency); + features.add(AnalyzerFeature.norm); + features.add(AnalyzerFeature.position); + + SegmentationAnalyzer segmentationAnalyzer = new SegmentationAnalyzer(); + segmentationAnalyzer.setName("test-" + UUID.randomUUID()); + segmentationAnalyzer.setProperties(properties); + segmentationAnalyzer.setFeatures(features); + + createGetAndDeleteTypedAnalyzer(db, segmentationAnalyzer); + } + + @ParameterizedTest + @MethodSource("dbs") + void collationAnalyzer(ArangoDatabase db) { + assumeTrue(isAtLeastVersion(3, 9)); + + CollationAnalyzerProperties properties = new CollationAnalyzerProperties(); + properties.setLocale("ru"); + + Set features = new HashSet<>(); + features.add(AnalyzerFeature.frequency); + features.add(AnalyzerFeature.norm); + features.add(AnalyzerFeature.position); + + CollationAnalyzer collationAnalyzer = new CollationAnalyzer(); + collationAnalyzer.setName("test-" + UUID.randomUUID()); + collationAnalyzer.setProperties(properties); + collationAnalyzer.setFeatures(features); + + createGetAndDeleteTypedAnalyzer(db, collationAnalyzer); + } + + + @DisabledIfSystemProperty(named = "skipStatefulTests", matches = "^(|true|1)$", disabledReason = "Test requires server with analyzer model located at `/tmp/foo.bin`") + @ParameterizedTest + @MethodSource("dbs") + void classificationAnalyzer(ArangoDatabase db) { + assumeTrue(isAtLeastVersion(3, 10)); + assumeTrue(isEnterprise()); + + ClassificationAnalyzerProperties properties = new ClassificationAnalyzerProperties(); + properties.setModelLocation("/tmp/foo.bin"); + properties.setTopK(2); + properties.setThreshold(.5); + + Set features = new HashSet<>(); + features.add(AnalyzerFeature.frequency); + features.add(AnalyzerFeature.norm); + features.add(AnalyzerFeature.position); + + ClassificationAnalyzer analyzer = new ClassificationAnalyzer(); + analyzer.setName("test-" + UUID.randomUUID()); + analyzer.setProperties(properties); + analyzer.setFeatures(features); + + createGetAndDeleteTypedAnalyzer(db, analyzer); + } + + @DisabledIfSystemProperty(named = "skipStatefulTests", matches = "^(|true|1)$", disabledReason = "Test requires server with analyzer model located at `/tmp/foo.bin`") + @ParameterizedTest + @MethodSource("dbs") + void nearestNeighborsAnalyzer(ArangoDatabase db) { + assumeTrue(isAtLeastVersion(3, 10)); + assumeTrue(isEnterprise()); + + NearestNeighborsAnalyzerProperties properties = new NearestNeighborsAnalyzerProperties(); + properties.setModelLocation("/tmp/foo.bin"); + properties.setTopK(2); + + Set features = new HashSet<>(); + features.add(AnalyzerFeature.frequency); + features.add(AnalyzerFeature.norm); + features.add(AnalyzerFeature.position); + + NearestNeighborsAnalyzer analyzer = new NearestNeighborsAnalyzer(); + analyzer.setName("test-" + UUID.randomUUID()); + analyzer.setProperties(properties); + analyzer.setFeatures(features); + + createGetAndDeleteTypedAnalyzer(db, analyzer); + } + + @ParameterizedTest + @MethodSource("dbs") + void MinHashAnalyzer(ArangoDatabase db) { + assumeTrue(isAtLeastVersion(3, 10)); + assumeTrue(isEnterprise()); + + SegmentationAnalyzerProperties segProperties = new SegmentationAnalyzerProperties(); + segProperties.setBreakMode(SegmentationAnalyzerProperties.BreakMode.alpha); + segProperties.setAnalyzerCase(SearchAnalyzerCase.lower); + + SegmentationAnalyzer segAnalyzer = new SegmentationAnalyzer(); + segAnalyzer.setProperties(segProperties); + + MinHashAnalyzerProperties properties = new MinHashAnalyzerProperties(); + properties.setAnalyzer(segAnalyzer); + properties.setNumHashes(2); + + Set features = new HashSet<>(); + features.add(AnalyzerFeature.frequency); + features.add(AnalyzerFeature.norm); + features.add(AnalyzerFeature.position); + + MinHashAnalyzer analyzer = new MinHashAnalyzer(); + analyzer.setName("test-" + UUID.randomUUID()); + analyzer.setProperties(properties); + analyzer.setFeatures(features); + + createGetAndDeleteTypedAnalyzer(db, analyzer); + } + + @ParameterizedTest + @MethodSource("dbs") + void WildcardAnalyzer(ArangoDatabase db) { + assumeTrue(isAtLeastVersion(3, 12)); + + NormAnalyzerProperties properties = new NormAnalyzerProperties(); + properties.setLocale("ru"); + properties.setAnalyzerCase(SearchAnalyzerCase.lower); + properties.setAccent(true); + + NormAnalyzer normAnalyzer = new NormAnalyzer(); + normAnalyzer.setProperties(properties); + + WildcardAnalyzerProperties wildcardProperties = new WildcardAnalyzerProperties(); + wildcardProperties.setNgramSize(3); + wildcardProperties.setAnalyzer(normAnalyzer); + + Set features = new HashSet<>(); + features.add(AnalyzerFeature.frequency); + features.add(AnalyzerFeature.position); + + WildcardAnalyzer wildcardAnalyzer = new WildcardAnalyzer(); + wildcardAnalyzer.setName("test-" + UUID.randomUUID()); + wildcardAnalyzer.setProperties(wildcardProperties); + wildcardAnalyzer.setFeatures(features); + + createGetAndDeleteTypedAnalyzer(db, wildcardAnalyzer); + } + + @ParameterizedTest + @MethodSource("dbs") + void offsetFeature(ArangoDatabase db) { + assumeTrue(isAtLeastVersion(3, 10)); + + String name = "test-" + UUID.randomUUID(); + + Set features = new HashSet<>(); + features.add(AnalyzerFeature.frequency); + features.add(AnalyzerFeature.norm); + features.add(AnalyzerFeature.position); + features.add(AnalyzerFeature.offset); + + IdentityAnalyzer analyzer = new IdentityAnalyzer(); + analyzer.setFeatures(features); + analyzer.setName(name); + + createGetAndDeleteTypedAnalyzer(db, analyzer); + } + +} diff --git a/test-functional/src/test/java/com/arangodb/ArangoVertexCollectionAsyncTest.java b/test-functional/src/test/java/com/arangodb/ArangoVertexCollectionAsyncTest.java new file mode 100644 index 000000000..f7fc0bee8 --- /dev/null +++ b/test-functional/src/test/java/com/arangodb/ArangoVertexCollectionAsyncTest.java @@ -0,0 +1,495 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb; + +import com.arangodb.entity.BaseDocument; +import com.arangodb.entity.VertexEntity; +import com.arangodb.entity.VertexUpdateEntity; +import com.arangodb.model.*; +import com.arangodb.util.RawJson; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; + +import java.util.Collection; +import java.util.Collections; +import java.util.UUID; +import java.util.concurrent.ExecutionException; +import java.util.stream.Stream; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.catchThrowable; + + +/** + * @author Mark Vollmary + * @author Michele Rastelli + */ +class ArangoVertexCollectionAsyncTest extends BaseJunit5 { + + private static final String GRAPH_NAME = "ArangoVertexCollectionTest_graph"; + private static final String COLLECTION_NAME = rndName(); + + private static Stream asyncVertices() { + return asyncDbsStream() + .map(mapNamedPayload(db -> db.graph(GRAPH_NAME).vertexCollection(COLLECTION_NAME))) + .map(Arguments::of); + } + + @BeforeAll + static void init() { + initCollections(COLLECTION_NAME); + initGraph( + GRAPH_NAME, + null, + new GraphCreateOptions().orphanCollections(COLLECTION_NAME) + ); + } + + @ParameterizedTest + @MethodSource("asyncVertices") + void dropVertexCollection(ArangoVertexCollectionAsync vertices) throws ExecutionException, InterruptedException { + ArangoGraphAsync graph = vertices.graph(); + vertices.remove().get(); + final Collection vertexCollections = graph.getVertexCollections().get(); + assertThat(vertexCollections).isEmpty(); + assertThat(graph.db().collection(COLLECTION_NAME).exists().get()).isTrue(); + + // revert + graph.addVertexCollection(COLLECTION_NAME).get(); + } + + @ParameterizedTest + @MethodSource("asyncVertices") + void dropVertexCollectionDropCollectionTrue(ArangoVertexCollectionAsync vertices) throws ExecutionException, InterruptedException { + ArangoGraphAsync graph = vertices.graph(); + vertices.remove(new VertexCollectionRemoveOptions().dropCollection(true)).get(); + final Collection vertexCollections = graph.getVertexCollections().get(); + assertThat(vertexCollections).isEmpty(); + assertThat(graph.db().collection(COLLECTION_NAME).exists().get()).isFalse(); + + // revert + initCollections(COLLECTION_NAME); + graph.addVertexCollection(COLLECTION_NAME).get(); + } + + @ParameterizedTest + @MethodSource("asyncVertices") + void insertVertex(ArangoVertexCollectionAsync vertices) throws ExecutionException, InterruptedException { + final VertexEntity vertex = vertices + .insertVertex(new BaseDocument(), null).get(); + assertThat(vertex).isNotNull(); + ArangoCollectionAsync collection = vertices.graph().db().collection(vertices.name()); + final BaseDocument document = collection + .getDocument(vertex.getKey(), BaseDocument.class, null).get(); + assertThat(document).isNotNull(); + assertThat(document.getKey()).isEqualTo(vertex.getKey()); + } + + @ParameterizedTest + @MethodSource("asyncVertices") + void insertVertexViolatingUniqueConstraint(ArangoVertexCollectionAsync vertices) throws ExecutionException, InterruptedException { + ArangoCollectionAsync collection = vertices.graph().db().collection(vertices.name()); + collection + .ensurePersistentIndex(Collections.singletonList("field"), + new PersistentIndexOptions().unique(true).sparse(true)).get(); + + VertexEntity inserted = vertices.insertVertex(RawJson.of("{\"field\": 99}")).get(); + + Throwable thrown = catchThrowable(() -> vertices.insertVertex(RawJson.of("{\"field\": 99}")).get()).getCause(); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + ArangoDBException e = (ArangoDBException) thrown; + assertThat(e.getResponseCode()).isEqualTo(409); + assertThat(e.getErrorNum()).isEqualTo(1210); + + // revert + vertices.deleteVertex(inserted.getKey()).get(); + } + + @ParameterizedTest + @MethodSource("asyncVertices") + void duplicateInsertSameObjectVertex(ArangoVertexCollectionAsync vertices) { + + // ######################################################### + // Create a new BaseDocument + // ######################################################### + + UUID uuid = UUID.randomUUID(); + BaseDocument bd = new BaseDocument(UUID.randomUUID().toString()); + bd.setKey(uuid.toString()); + bd.addAttribute("name", "Paul"); + + vertices.insertVertex(bd); + + UUID uuid2 = UUID.randomUUID(); + BaseDocument bd2 = new BaseDocument(UUID.randomUUID().toString()); + bd2.setKey(uuid2.toString()); + bd2.addAttribute("name", "Paul"); + + vertices.insertVertex(bd2); + } + + @ParameterizedTest + @MethodSource("asyncVertices") + void insertVertexUpdateRev(ArangoVertexCollectionAsync vertices) throws ExecutionException, InterruptedException { + final BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + final VertexEntity vertex = vertices.insertVertex(doc, null).get(); + assertThat(doc.getRevision()).isNull(); + assertThat(vertex.getRev()).isNotNull(); + } + + @ParameterizedTest + @MethodSource("asyncVertices") + void getVertex(ArangoVertexCollectionAsync vertices) throws ExecutionException, InterruptedException { + final VertexEntity vertex = vertices + .insertVertex(new BaseDocument(), null).get(); + final BaseDocument document = vertices + .getVertex(vertex.getKey(), BaseDocument.class, null).get(); + assertThat(document).isNotNull(); + assertThat(document.getKey()).isEqualTo(vertex.getKey()); + } + + @ParameterizedTest + @MethodSource("asyncVertices") + void getVertexIfMatch(ArangoVertexCollectionAsync vertices) throws ExecutionException, InterruptedException { + final VertexEntity vertex = vertices + .insertVertex(new BaseDocument(), null).get(); + final GraphDocumentReadOptions options = new GraphDocumentReadOptions().ifMatch(vertex.getRev()); + final BaseDocument document = vertices + .getVertex(vertex.getKey(), BaseDocument.class, options).get(); + assertThat(document).isNotNull(); + assertThat(document.getKey()).isEqualTo(vertex.getKey()); + } + + @ParameterizedTest + @MethodSource("asyncVertices") + void getVertexIfMatchFail(ArangoVertexCollectionAsync vertices) throws ExecutionException, InterruptedException { + final VertexEntity vertex = vertices + .insertVertex(new BaseDocument(), null).get(); + final GraphDocumentReadOptions options = new GraphDocumentReadOptions().ifMatch("no"); + final BaseDocument vertex2 = vertices + .getVertex(vertex.getKey(), BaseDocument.class, options).get(); + assertThat(vertex2).isNull(); + } + + @ParameterizedTest + @MethodSource("asyncVertices") + void getVertexIfNoneMatch(ArangoVertexCollectionAsync vertices) throws ExecutionException, InterruptedException { + final VertexEntity vertex = vertices + .insertVertex(new BaseDocument(), null).get(); + final GraphDocumentReadOptions options = new GraphDocumentReadOptions().ifNoneMatch("no"); + final BaseDocument document = vertices + .getVertex(vertex.getKey(), BaseDocument.class, options).get(); + assertThat(document).isNotNull(); + assertThat(document.getKey()).isEqualTo(vertex.getKey()); + } + + @ParameterizedTest + @MethodSource("asyncVertices") + void getVertexIfNoneMatchFail(ArangoVertexCollectionAsync vertices) throws ExecutionException, InterruptedException { + final VertexEntity vertex = vertices + .insertVertex(new BaseDocument(), null).get(); + final GraphDocumentReadOptions options = new GraphDocumentReadOptions().ifNoneMatch(vertex.getRev()); + final BaseDocument vertex2 = vertices + .getVertex(vertex.getKey(), BaseDocument.class, options).get(); + assertThat(vertex2).isNull(); + } + + @ParameterizedTest + @MethodSource("asyncVertices") + void replaceVertex(ArangoVertexCollectionAsync vertices) throws ExecutionException, InterruptedException { + final BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + doc.addAttribute("a", "test"); + final VertexEntity createResult = vertices + .insertVertex(doc, null).get(); + doc.removeAttribute("a"); + doc.addAttribute("b", "test"); + final VertexUpdateEntity replaceResult = vertices + .replaceVertex(createResult.getKey(), doc, null).get(); + assertThat(replaceResult).isNotNull(); + assertThat(replaceResult.getId()).isEqualTo(createResult.getId()); + assertThat(replaceResult.getRev()).isNotEqualTo(replaceResult.getOldRev()); + assertThat(replaceResult.getOldRev()).isEqualTo(createResult.getRev()); + + final BaseDocument readResult = vertices + .getVertex(createResult.getKey(), BaseDocument.class, null).get(); + assertThat(readResult.getKey()).isEqualTo(createResult.getKey()); + assertThat(readResult.getRevision()).isEqualTo(replaceResult.getRev()); + assertThat(readResult.getProperties().keySet()).doesNotContain("a"); + assertThat(readResult.getAttribute("b")).isNotNull(); + assertThat(String.valueOf(readResult.getAttribute("b"))).isEqualTo("test"); + } + + @ParameterizedTest + @MethodSource("asyncVertices") + void replaceVertexUpdateRev(ArangoVertexCollectionAsync vertices) throws ExecutionException, InterruptedException { + final BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + final VertexEntity createResult = vertices + .insertVertex(doc, null).get(); + final VertexUpdateEntity replaceResult = vertices + .replaceVertex(createResult.getKey(), doc, null).get(); + assertThat(doc.getRevision()).isNull(); + assertThat(createResult.getRev()).isNotNull(); + assertThat(replaceResult.getRev()) + .isNotNull() + .isNotEqualTo(createResult.getRev()); + } + + @ParameterizedTest + @MethodSource("asyncVertices") + void replaceVertexIfMatch(ArangoVertexCollectionAsync vertices) throws ExecutionException, InterruptedException { + final BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + doc.addAttribute("a", "test"); + final VertexEntity createResult = vertices + .insertVertex(doc, null).get(); + doc.removeAttribute("a"); + doc.addAttribute("b", "test"); + final VertexReplaceOptions options = new VertexReplaceOptions().ifMatch(createResult.getRev()); + final VertexUpdateEntity replaceResult = vertices + .replaceVertex(createResult.getKey(), doc, options).get(); + assertThat(replaceResult).isNotNull(); + assertThat(replaceResult.getId()).isEqualTo(createResult.getId()); + assertThat(replaceResult.getRev()).isNotEqualTo(replaceResult.getOldRev()); + assertThat(replaceResult.getOldRev()).isEqualTo(createResult.getRev()); + + final BaseDocument readResult = vertices + .getVertex(createResult.getKey(), BaseDocument.class, null).get(); + assertThat(readResult.getKey()).isEqualTo(createResult.getKey()); + assertThat(readResult.getRevision()).isEqualTo(replaceResult.getRev()); + assertThat(readResult.getProperties().keySet()).doesNotContain("a"); + assertThat(readResult.getAttribute("b")).isNotNull(); + assertThat(String.valueOf(readResult.getAttribute("b"))).isEqualTo("test"); + } + + @ParameterizedTest + @MethodSource("asyncVertices") + void replaceVertexIfMatchFail(ArangoVertexCollectionAsync vertices) throws ExecutionException, InterruptedException { + final BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + doc.addAttribute("a", "test"); + final VertexEntity createResult = vertices + .insertVertex(doc, null).get(); + doc.removeAttribute("a"); + doc.addAttribute("b", "test"); + final VertexReplaceOptions options = new VertexReplaceOptions().ifMatch("no"); + Throwable thrown = catchThrowable(() -> vertices.replaceVertex(createResult.getKey(), doc, options).get()).getCause(); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + ArangoDBException e = (ArangoDBException) thrown; + assertThat(e.getResponseCode()).isEqualTo(412); + assertThat(e.getErrorNum()).isEqualTo(1200); + } + + @ParameterizedTest + @MethodSource("asyncVertices") + void updateVertex(ArangoVertexCollectionAsync vertices) throws ExecutionException, InterruptedException { + final BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + doc.addAttribute("a", "test"); + doc.addAttribute("c", "test"); + final VertexEntity createResult = vertices + .insertVertex(doc, null).get(); + doc.updateAttribute("a", "test1"); + doc.addAttribute("b", "test"); + doc.updateAttribute("c", null); + final VertexUpdateEntity updateResult = vertices + .updateVertex(createResult.getKey(), doc, null).get(); + assertThat(updateResult).isNotNull(); + assertThat(updateResult.getId()).isEqualTo(createResult.getId()); + assertThat(updateResult.getRev()).isNotEqualTo(updateResult.getOldRev()); + assertThat(updateResult.getOldRev()).isEqualTo(createResult.getRev()); + + final BaseDocument readResult = vertices + .getVertex(createResult.getKey(), BaseDocument.class, null).get(); + assertThat(readResult.getKey()).isEqualTo(createResult.getKey()); + assertThat(readResult.getAttribute("a")).isNotNull(); + assertThat(String.valueOf(readResult.getAttribute("a"))).isEqualTo("test1"); + assertThat(readResult.getAttribute("b")).isNotNull(); + assertThat(String.valueOf(readResult.getAttribute("b"))).isEqualTo("test"); + assertThat(readResult.getRevision()).isEqualTo(updateResult.getRev()); + assertThat(readResult.getProperties()).containsKey("c"); + } + + @ParameterizedTest + @MethodSource("asyncVertices") + void updateVertexUpdateRev(ArangoVertexCollectionAsync vertices) throws ExecutionException, InterruptedException { + final BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + final VertexEntity createResult = vertices + .insertVertex(doc, null).get(); + doc.addAttribute("foo", "bar"); + final VertexUpdateEntity updateResult = vertices + .updateVertex(createResult.getKey(), doc, null).get(); + assertThat(doc.getRevision()).isNull(); + assertThat(createResult.getRev()).isNotNull(); + assertThat(updateResult.getRev()) + .isNotNull() + .isNotEqualTo(createResult.getRev()); + } + + @ParameterizedTest + @MethodSource("asyncVertices") + void updateVertexIfMatch(ArangoVertexCollectionAsync vertices) throws ExecutionException, InterruptedException { + final BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + doc.addAttribute("a", "test"); + doc.addAttribute("c", "test"); + final VertexEntity createResult = vertices + .insertVertex(doc, null).get(); + doc.updateAttribute("a", "test1"); + doc.addAttribute("b", "test"); + doc.updateAttribute("c", null); + final VertexUpdateOptions options = new VertexUpdateOptions().ifMatch(createResult.getRev()); + final VertexUpdateEntity updateResult = vertices + .updateVertex(createResult.getKey(), doc, options).get(); + assertThat(updateResult).isNotNull(); + assertThat(updateResult.getId()).isEqualTo(createResult.getId()); + assertThat(updateResult.getRev()).isNotEqualTo(updateResult.getOldRev()); + assertThat(updateResult.getOldRev()).isEqualTo(createResult.getRev()); + + final BaseDocument readResult = vertices + .getVertex(createResult.getKey(), BaseDocument.class, null).get(); + assertThat(readResult.getKey()).isEqualTo(createResult.getKey()); + assertThat(readResult.getAttribute("a")).isNotNull(); + assertThat(String.valueOf(readResult.getAttribute("a"))).isEqualTo("test1"); + assertThat(readResult.getAttribute("b")).isNotNull(); + assertThat(String.valueOf(readResult.getAttribute("b"))).isEqualTo("test"); + assertThat(readResult.getRevision()).isEqualTo(updateResult.getRev()); + assertThat(readResult.getProperties()).containsKey("c"); + } + + @ParameterizedTest + @MethodSource("asyncVertices") + void updateVertexIfMatchFail(ArangoVertexCollectionAsync vertices) throws ExecutionException, InterruptedException { + final BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + doc.addAttribute("a", "test"); + doc.addAttribute("c", "test"); + final VertexEntity createResult = vertices + .insertVertex(doc, null).get(); + doc.updateAttribute("a", "test1"); + doc.addAttribute("b", "test"); + doc.updateAttribute("c", null); + final VertexUpdateOptions options = new VertexUpdateOptions().ifMatch("no"); + + Throwable thrown = catchThrowable(() -> vertices.updateVertex(createResult.getKey(), doc, options).get()).getCause(); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + ArangoDBException e = (ArangoDBException) thrown; + assertThat(e.getResponseCode()).isEqualTo(412); + assertThat(e.getErrorNum()).isEqualTo(1200); + } + + @ParameterizedTest + @MethodSource("asyncVertices") + void updateVertexKeepNullTrue(ArangoVertexCollectionAsync vertices) throws ExecutionException, InterruptedException { + final BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + doc.addAttribute("a", "test"); + final VertexEntity createResult = vertices + .insertVertex(doc, null).get(); + doc.updateAttribute("a", null); + final VertexUpdateOptions options = new VertexUpdateOptions().keepNull(true); + final VertexUpdateEntity updateResult = vertices + .updateVertex(createResult.getKey(), doc, options).get(); + assertThat(updateResult).isNotNull(); + assertThat(updateResult.getId()).isEqualTo(createResult.getId()); + assertThat(updateResult.getRev()).isNotEqualTo(updateResult.getOldRev()); + assertThat(updateResult.getOldRev()).isEqualTo(createResult.getRev()); + + final BaseDocument readResult = vertices + .getVertex(createResult.getKey(), BaseDocument.class, null).get(); + assertThat(readResult.getKey()).isEqualTo(createResult.getKey()); + assertThat(readResult.getProperties().keySet()).hasSize(4); + assertThat(readResult.getProperties()).containsKey("a"); + } + + @ParameterizedTest + @MethodSource("asyncVertices") + void updateVertexKeepNullFalse(ArangoVertexCollectionAsync vertices) throws ExecutionException, InterruptedException { + final BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + doc.addAttribute("a", "test"); + final VertexEntity createResult = vertices + .insertVertex(doc, null).get(); + doc.updateAttribute("a", null); + final VertexUpdateOptions options = new VertexUpdateOptions().keepNull(false); + final VertexUpdateEntity updateResult = vertices + .updateVertex(createResult.getKey(), doc, options).get(); + assertThat(updateResult).isNotNull(); + assertThat(updateResult.getId()).isEqualTo(createResult.getId()); + assertThat(updateResult.getRev()).isNotEqualTo(updateResult.getOldRev()); + assertThat(updateResult.getOldRev()).isEqualTo(createResult.getRev()); + + final BaseDocument readResult = vertices + .getVertex(createResult.getKey(), BaseDocument.class, null).get(); + assertThat(readResult.getKey()).isEqualTo(createResult.getKey()); + assertThat(readResult.getId()).isEqualTo(createResult.getId()); + assertThat(readResult.getRevision()).isNotNull(); + assertThat(readResult.getProperties().keySet()).doesNotContain("a"); + } + + @ParameterizedTest + @MethodSource("asyncVertices") + void deleteVertex(ArangoVertexCollectionAsync vertices) throws ExecutionException, InterruptedException { + final BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + final VertexEntity createResult = vertices + .insertVertex(doc, null).get(); + vertices.deleteVertex(createResult.getKey(), null).get(); + final BaseDocument vertex = vertices + .getVertex(createResult.getKey(), BaseDocument.class, null).get(); + assertThat(vertex).isNull(); + } + + @ParameterizedTest + @MethodSource("asyncVertices") + void deleteVertexIfMatch(ArangoVertexCollectionAsync vertices) throws ExecutionException, InterruptedException { + final BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + final VertexEntity createResult = vertices + .insertVertex(doc, null).get(); + final VertexDeleteOptions options = new VertexDeleteOptions().ifMatch(createResult.getRev()); + vertices.deleteVertex(createResult.getKey(), options).get(); + final BaseDocument vertex = vertices + .getVertex(createResult.getKey(), BaseDocument.class, null).get(); + assertThat(vertex).isNull(); + } + + @ParameterizedTest + @MethodSource("asyncVertices") + void deleteVertexIfMatchFail(ArangoVertexCollectionAsync vertices) throws ExecutionException, InterruptedException { + final BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + final VertexEntity createResult = vertices + .insertVertex(doc, null).get(); + final VertexDeleteOptions options = new VertexDeleteOptions().ifMatch("no"); + Throwable thrown = catchThrowable(() -> vertices.deleteVertex(createResult.getKey(), options).get()).getCause(); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + ArangoDBException e = (ArangoDBException) thrown; + assertThat(e.getResponseCode()).isEqualTo(412); + assertThat(e.getErrorNum()).isEqualTo(1200); + } + + @ParameterizedTest + @MethodSource("asyncVertices") + void vertexKeyWithSpecialChars(ArangoVertexCollectionAsync vertices) throws ExecutionException, InterruptedException { + final String key = "_-:.@()+,=;$!*'%" + UUID.randomUUID(); + final VertexEntity vertex = vertices + .insertVertex(new BaseDocument(key), null).get(); + assertThat(vertex).isNotNull(); + ArangoCollectionAsync collection = vertices.graph().db().collection(vertices.name()); + final BaseDocument document = collection + .getDocument(vertex.getKey(), BaseDocument.class, null).get(); + assertThat(document).isNotNull(); + assertThat(document.getKey()).isEqualTo(key); + } + +} diff --git a/test-functional/src/test/java/com/arangodb/ArangoVertexCollectionTest.java b/test-functional/src/test/java/com/arangodb/ArangoVertexCollectionTest.java new file mode 100644 index 000000000..af184a5e6 --- /dev/null +++ b/test-functional/src/test/java/com/arangodb/ArangoVertexCollectionTest.java @@ -0,0 +1,495 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb; + +import com.arangodb.entity.BaseDocument; +import com.arangodb.entity.VertexEntity; +import com.arangodb.entity.VertexUpdateEntity; +import com.arangodb.model.*; +import com.arangodb.util.RawJson; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; + +import java.util.Collection; +import java.util.Collections; +import java.util.UUID; +import java.util.stream.Stream; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.catchThrowable; + + +/** + * @author Mark Vollmary + * @author Michele Rastelli + */ +class ArangoVertexCollectionTest extends BaseJunit5 { + + private static final String GRAPH_NAME = "ArangoVertexCollectionTest_graph"; + private static final String COLLECTION_NAME = rndName(); + + private static Stream vertices() { + return dbsStream() + .map(mapNamedPayload(db -> db.graph(GRAPH_NAME).vertexCollection(COLLECTION_NAME))) + .map(Arguments::of); + } + + @BeforeAll + static void init() { + initCollections(COLLECTION_NAME); + initGraph( + GRAPH_NAME, + null, + new GraphCreateOptions().orphanCollections(COLLECTION_NAME) + ); + } + + @ParameterizedTest + @MethodSource("vertices") + void dropVertexCollection(ArangoVertexCollection vertices) { + ArangoGraph graph = vertices.graph(); + vertices.remove(); + final Collection vertexCollections = graph.getVertexCollections(); + assertThat(vertexCollections).isEmpty(); + assertThat(graph.db().collection(COLLECTION_NAME).exists()).isTrue(); + + // revert + graph.addVertexCollection(COLLECTION_NAME); + } + + @ParameterizedTest + @MethodSource("vertices") + void dropVertexCollectionDropCollectionTrue(ArangoVertexCollection vertices) { + ArangoGraph graph = vertices.graph(); + vertices.remove(new VertexCollectionRemoveOptions().dropCollection(true)); + final Collection vertexCollections = graph.getVertexCollections(); + assertThat(vertexCollections).isEmpty(); + assertThat(graph.db().collection(COLLECTION_NAME).exists()).isFalse(); + + // revert + initCollections(COLLECTION_NAME); + graph.addVertexCollection(COLLECTION_NAME); + } + + @ParameterizedTest + @MethodSource("vertices") + void insertVertex(ArangoVertexCollection vertices) { + final VertexEntity vertex = vertices + .insertVertex(new BaseDocument(), null); + assertThat(vertex).isNotNull(); + ArangoCollection collection = vertices.graph().db().collection(vertices.name()); + final BaseDocument document = collection + .getDocument(vertex.getKey(), BaseDocument.class, null); + assertThat(document).isNotNull(); + assertThat(document.getKey()).isEqualTo(vertex.getKey()); + } + + @ParameterizedTest + @MethodSource("vertices") + void insertVertexViolatingUniqueConstraint(ArangoVertexCollection vertices) { + ArangoCollection collection = vertices.graph().db().collection(vertices.name()); + collection + .ensurePersistentIndex(Collections.singletonList("field"), + new PersistentIndexOptions().unique(true).sparse(true)); + + VertexEntity inserted = vertices.insertVertex(RawJson.of("{\"field\": 99}")); + + try { + vertices.insertVertex(RawJson.of("{\"field\": 99}")); + } catch (ArangoDBException e) { + assertThat(e.getResponseCode()).isEqualTo(409); + assertThat(e.getErrorNum()).isEqualTo(1210); + } + + // revert + vertices.deleteVertex(inserted.getKey()); + } + + @ParameterizedTest + @MethodSource("vertices") + void duplicateInsertSameObjectVertex(ArangoVertexCollection vertices) { + + // ######################################################### + // Create a new BaseDocument + // ######################################################### + + UUID uuid = UUID.randomUUID(); + BaseDocument bd = new BaseDocument(UUID.randomUUID().toString()); + bd.setKey(uuid.toString()); + bd.addAttribute("name", "Paul"); + + vertices.insertVertex(bd); + + UUID uuid2 = UUID.randomUUID(); + BaseDocument bd2 = new BaseDocument(UUID.randomUUID().toString()); + bd2.setKey(uuid2.toString()); + bd2.addAttribute("name", "Paul"); + + vertices.insertVertex(bd2); + } + + @ParameterizedTest + @MethodSource("vertices") + void insertVertexUpdateRev(ArangoVertexCollection vertices) { + final BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + final VertexEntity vertex = vertices.insertVertex(doc, null); + assertThat(doc.getRevision()).isNull(); + assertThat(vertex.getRev()).isNotNull(); + } + + @ParameterizedTest + @MethodSource("vertices") + void getVertex(ArangoVertexCollection vertices) { + final VertexEntity vertex = vertices + .insertVertex(new BaseDocument(), null); + final BaseDocument document = vertices + .getVertex(vertex.getKey(), BaseDocument.class, null); + assertThat(document).isNotNull(); + assertThat(document.getKey()).isEqualTo(vertex.getKey()); + } + + @ParameterizedTest + @MethodSource("vertices") + void getVertexIfMatch(ArangoVertexCollection vertices) { + final VertexEntity vertex = vertices + .insertVertex(new BaseDocument(), null); + final GraphDocumentReadOptions options = new GraphDocumentReadOptions().ifMatch(vertex.getRev()); + final BaseDocument document = vertices + .getVertex(vertex.getKey(), BaseDocument.class, options); + assertThat(document).isNotNull(); + assertThat(document.getKey()).isEqualTo(vertex.getKey()); + } + + @ParameterizedTest + @MethodSource("vertices") + void getVertexIfMatchFail(ArangoVertexCollection vertices) { + final VertexEntity vertex = vertices + .insertVertex(new BaseDocument(), null); + final GraphDocumentReadOptions options = new GraphDocumentReadOptions().ifMatch("no"); + final BaseDocument vertex2 = vertices + .getVertex(vertex.getKey(), BaseDocument.class, options); + assertThat(vertex2).isNull(); + } + + @ParameterizedTest + @MethodSource("vertices") + void getVertexIfNoneMatch(ArangoVertexCollection vertices) { + final VertexEntity vertex = vertices + .insertVertex(new BaseDocument(), null); + final GraphDocumentReadOptions options = new GraphDocumentReadOptions().ifNoneMatch("no"); + final BaseDocument document = vertices + .getVertex(vertex.getKey(), BaseDocument.class, options); + assertThat(document).isNotNull(); + assertThat(document.getKey()).isEqualTo(vertex.getKey()); + } + + @ParameterizedTest + @MethodSource("vertices") + void getVertexIfNoneMatchFail(ArangoVertexCollection vertices) { + final VertexEntity vertex = vertices + .insertVertex(new BaseDocument(), null); + final GraphDocumentReadOptions options = new GraphDocumentReadOptions().ifNoneMatch(vertex.getRev()); + final BaseDocument vertex2 = vertices + .getVertex(vertex.getKey(), BaseDocument.class, options); + assertThat(vertex2).isNull(); + } + + @ParameterizedTest + @MethodSource("vertices") + void replaceVertex(ArangoVertexCollection vertices) { + final BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + doc.addAttribute("a", "test"); + final VertexEntity createResult = vertices + .insertVertex(doc, null); + doc.removeAttribute("a"); + doc.addAttribute("b", "test"); + final VertexUpdateEntity replaceResult = vertices + .replaceVertex(createResult.getKey(), doc, null); + assertThat(replaceResult).isNotNull(); + assertThat(replaceResult.getId()).isEqualTo(createResult.getId()); + assertThat(replaceResult.getRev()).isNotEqualTo(replaceResult.getOldRev()); + assertThat(replaceResult.getOldRev()).isEqualTo(createResult.getRev()); + + final BaseDocument readResult = vertices + .getVertex(createResult.getKey(), BaseDocument.class, null); + assertThat(readResult.getKey()).isEqualTo(createResult.getKey()); + assertThat(readResult.getRevision()).isEqualTo(replaceResult.getRev()); + assertThat(readResult.getProperties().keySet()).doesNotContain("a"); + assertThat(readResult.getAttribute("b")).isNotNull(); + assertThat(String.valueOf(readResult.getAttribute("b"))).isEqualTo("test"); + } + + @ParameterizedTest + @MethodSource("vertices") + void replaceVertexUpdateRev(ArangoVertexCollection vertices) { + final BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + final VertexEntity createResult = vertices + .insertVertex(doc, null); + final VertexUpdateEntity replaceResult = vertices + .replaceVertex(createResult.getKey(), doc, null); + assertThat(doc.getRevision()).isNull(); + assertThat(createResult.getRev()).isNotNull(); + assertThat(replaceResult.getRev()) + .isNotNull() + .isNotEqualTo(createResult.getRev()); + } + + @ParameterizedTest + @MethodSource("vertices") + void replaceVertexIfMatch(ArangoVertexCollection vertices) { + final BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + doc.addAttribute("a", "test"); + final VertexEntity createResult = vertices + .insertVertex(doc, null); + doc.removeAttribute("a"); + doc.addAttribute("b", "test"); + final VertexReplaceOptions options = new VertexReplaceOptions().ifMatch(createResult.getRev()); + final VertexUpdateEntity replaceResult = vertices + .replaceVertex(createResult.getKey(), doc, options); + assertThat(replaceResult).isNotNull(); + assertThat(replaceResult.getId()).isEqualTo(createResult.getId()); + assertThat(replaceResult.getRev()).isNotEqualTo(replaceResult.getOldRev()); + assertThat(replaceResult.getOldRev()).isEqualTo(createResult.getRev()); + + final BaseDocument readResult = vertices + .getVertex(createResult.getKey(), BaseDocument.class, null); + assertThat(readResult.getKey()).isEqualTo(createResult.getKey()); + assertThat(readResult.getRevision()).isEqualTo(replaceResult.getRev()); + assertThat(readResult.getProperties().keySet()).doesNotContain("a"); + assertThat(readResult.getAttribute("b")).isNotNull(); + assertThat(String.valueOf(readResult.getAttribute("b"))).isEqualTo("test"); + } + + @ParameterizedTest + @MethodSource("vertices") + void replaceVertexIfMatchFail(ArangoVertexCollection vertices) { + final BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + doc.addAttribute("a", "test"); + final VertexEntity createResult = vertices + .insertVertex(doc, null); + doc.removeAttribute("a"); + doc.addAttribute("b", "test"); + final VertexReplaceOptions options = new VertexReplaceOptions().ifMatch("no"); + Throwable thrown = catchThrowable(() -> vertices.replaceVertex(createResult.getKey(), doc, options)); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + ArangoDBException e = (ArangoDBException) thrown; + assertThat(e.getResponseCode()).isEqualTo(412); + assertThat(e.getErrorNum()).isEqualTo(1200); + } + + @ParameterizedTest + @MethodSource("vertices") + void updateVertex(ArangoVertexCollection vertices) { + final BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + doc.addAttribute("a", "test"); + doc.addAttribute("c", "test"); + final VertexEntity createResult = vertices + .insertVertex(doc, null); + doc.updateAttribute("a", "test1"); + doc.addAttribute("b", "test"); + doc.updateAttribute("c", null); + final VertexUpdateEntity updateResult = vertices + .updateVertex(createResult.getKey(), doc, null); + assertThat(updateResult).isNotNull(); + assertThat(updateResult.getId()).isEqualTo(createResult.getId()); + assertThat(updateResult.getRev()).isNotEqualTo(updateResult.getOldRev()); + assertThat(updateResult.getOldRev()).isEqualTo(createResult.getRev()); + + final BaseDocument readResult = vertices + .getVertex(createResult.getKey(), BaseDocument.class, null); + assertThat(readResult.getKey()).isEqualTo(createResult.getKey()); + assertThat(readResult.getAttribute("a")).isNotNull(); + assertThat(String.valueOf(readResult.getAttribute("a"))).isEqualTo("test1"); + assertThat(readResult.getAttribute("b")).isNotNull(); + assertThat(String.valueOf(readResult.getAttribute("b"))).isEqualTo("test"); + assertThat(readResult.getRevision()).isEqualTo(updateResult.getRev()); + assertThat(readResult.getProperties()).containsKey("c"); + } + + @ParameterizedTest + @MethodSource("vertices") + void updateVertexUpdateRev(ArangoVertexCollection vertices) { + final BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + final VertexEntity createResult = vertices + .insertVertex(doc, null); + doc.addAttribute("foo", "bar"); + final VertexUpdateEntity updateResult = vertices + .updateVertex(createResult.getKey(), doc, null); + assertThat(doc.getRevision()).isNull(); + assertThat(createResult.getRev()).isNotNull(); + assertThat(updateResult.getRev()) + .isNotNull() + .isNotEqualTo(createResult.getRev()); + } + + @ParameterizedTest + @MethodSource("vertices") + void updateVertexIfMatch(ArangoVertexCollection vertices) { + final BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + doc.addAttribute("a", "test"); + doc.addAttribute("c", "test"); + final VertexEntity createResult = vertices + .insertVertex(doc, null); + doc.updateAttribute("a", "test1"); + doc.addAttribute("b", "test"); + doc.updateAttribute("c", null); + final VertexUpdateOptions options = new VertexUpdateOptions().ifMatch(createResult.getRev()); + final VertexUpdateEntity updateResult = vertices + .updateVertex(createResult.getKey(), doc, options); + assertThat(updateResult).isNotNull(); + assertThat(updateResult.getId()).isEqualTo(createResult.getId()); + assertThat(updateResult.getRev()).isNotEqualTo(updateResult.getOldRev()); + assertThat(updateResult.getOldRev()).isEqualTo(createResult.getRev()); + + final BaseDocument readResult = vertices + .getVertex(createResult.getKey(), BaseDocument.class, null); + assertThat(readResult.getKey()).isEqualTo(createResult.getKey()); + assertThat(readResult.getAttribute("a")).isNotNull(); + assertThat(String.valueOf(readResult.getAttribute("a"))).isEqualTo("test1"); + assertThat(readResult.getAttribute("b")).isNotNull(); + assertThat(String.valueOf(readResult.getAttribute("b"))).isEqualTo("test"); + assertThat(readResult.getRevision()).isEqualTo(updateResult.getRev()); + assertThat(readResult.getProperties()).containsKey("c"); + } + + @ParameterizedTest + @MethodSource("vertices") + void updateVertexIfMatchFail(ArangoVertexCollection vertices) { + final BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + doc.addAttribute("a", "test"); + doc.addAttribute("c", "test"); + final VertexEntity createResult = vertices + .insertVertex(doc, null); + doc.updateAttribute("a", "test1"); + doc.addAttribute("b", "test"); + doc.updateAttribute("c", null); + final VertexUpdateOptions options = new VertexUpdateOptions().ifMatch("no"); + + Throwable thrown = catchThrowable(() -> vertices.updateVertex(createResult.getKey(), doc, options)); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + ArangoDBException e = (ArangoDBException) thrown; + assertThat(e.getResponseCode()).isEqualTo(412); + assertThat(e.getErrorNum()).isEqualTo(1200); + } + + @ParameterizedTest + @MethodSource("vertices") + void updateVertexKeepNullTrue(ArangoVertexCollection vertices) { + final BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + doc.addAttribute("a", "test"); + final VertexEntity createResult = vertices + .insertVertex(doc, null); + doc.updateAttribute("a", null); + final VertexUpdateOptions options = new VertexUpdateOptions().keepNull(true); + final VertexUpdateEntity updateResult = vertices + .updateVertex(createResult.getKey(), doc, options); + assertThat(updateResult).isNotNull(); + assertThat(updateResult.getId()).isEqualTo(createResult.getId()); + assertThat(updateResult.getRev()).isNotEqualTo(updateResult.getOldRev()); + assertThat(updateResult.getOldRev()).isEqualTo(createResult.getRev()); + + final BaseDocument readResult = vertices + .getVertex(createResult.getKey(), BaseDocument.class, null); + assertThat(readResult.getKey()).isEqualTo(createResult.getKey()); + assertThat(readResult.getProperties().keySet()).hasSize(4); + assertThat(readResult.getProperties()).containsKey("a"); + } + + @ParameterizedTest + @MethodSource("vertices") + void updateVertexKeepNullFalse(ArangoVertexCollection vertices) { + final BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + doc.addAttribute("a", "test"); + final VertexEntity createResult = vertices + .insertVertex(doc, null); + doc.updateAttribute("a", null); + final VertexUpdateOptions options = new VertexUpdateOptions().keepNull(false); + final VertexUpdateEntity updateResult = vertices + .updateVertex(createResult.getKey(), doc, options); + assertThat(updateResult).isNotNull(); + assertThat(updateResult.getId()).isEqualTo(createResult.getId()); + assertThat(updateResult.getRev()).isNotEqualTo(updateResult.getOldRev()); + assertThat(updateResult.getOldRev()).isEqualTo(createResult.getRev()); + + final BaseDocument readResult = vertices + .getVertex(createResult.getKey(), BaseDocument.class, null); + assertThat(readResult.getKey()).isEqualTo(createResult.getKey()); + assertThat(readResult.getId()).isEqualTo(createResult.getId()); + assertThat(readResult.getRevision()).isNotNull(); + assertThat(readResult.getProperties().keySet()).doesNotContain("a"); + } + + @ParameterizedTest + @MethodSource("vertices") + void deleteVertex(ArangoVertexCollection vertices) { + final BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + final VertexEntity createResult = vertices + .insertVertex(doc, null); + vertices.deleteVertex(createResult.getKey(), null); + final BaseDocument vertex = vertices + .getVertex(createResult.getKey(), BaseDocument.class, null); + assertThat(vertex).isNull(); + } + + @ParameterizedTest + @MethodSource("vertices") + void deleteVertexIfMatch(ArangoVertexCollection vertices) { + final BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + final VertexEntity createResult = vertices + .insertVertex(doc, null); + final VertexDeleteOptions options = new VertexDeleteOptions().ifMatch(createResult.getRev()); + vertices.deleteVertex(createResult.getKey(), options); + final BaseDocument vertex = vertices + .getVertex(createResult.getKey(), BaseDocument.class, null); + assertThat(vertex).isNull(); + } + + @ParameterizedTest + @MethodSource("vertices") + void deleteVertexIfMatchFail(ArangoVertexCollection vertices) { + final BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + final VertexEntity createResult = vertices + .insertVertex(doc, null); + final VertexDeleteOptions options = new VertexDeleteOptions().ifMatch("no"); + Throwable thrown = catchThrowable(() -> vertices.deleteVertex(createResult.getKey(), options)); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + ArangoDBException e = (ArangoDBException) thrown; + assertThat(e.getResponseCode()).isEqualTo(412); + assertThat(e.getErrorNum()).isEqualTo(1200); + } + + @ParameterizedTest + @MethodSource("vertices") + void vertexKeyWithSpecialChars(ArangoVertexCollection vertices) { + final String key = "_-:.@()+,=;$!*'%" + UUID.randomUUID(); + final VertexEntity vertex = vertices + .insertVertex(new BaseDocument(key), null); + assertThat(vertex).isNotNull(); + ArangoCollection collection = vertices.graph().db().collection(vertices.name()); + final BaseDocument document = collection + .getDocument(vertex.getKey(), BaseDocument.class, null); + assertThat(document).isNotNull(); + assertThat(document.getKey()).isEqualTo(key); + } + +} diff --git a/test-functional/src/test/java/com/arangodb/ArangoViewAsyncTest.java b/test-functional/src/test/java/com/arangodb/ArangoViewAsyncTest.java new file mode 100644 index 000000000..362d07447 --- /dev/null +++ b/test-functional/src/test/java/com/arangodb/ArangoViewAsyncTest.java @@ -0,0 +1,128 @@ +/* + * DISCLAIMER + * + * Copyright 2018 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb; + +import com.arangodb.entity.ViewEntity; +import com.arangodb.entity.ViewType; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; + +import java.util.Collection; +import java.util.concurrent.ExecutionException; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.catchThrowable; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + + +/** + * @author Mark Vollmary + * @author Michele Rastelli + */ +class ArangoViewAsyncTest extends BaseJunit5 { + + @BeforeAll + static void init() { + initDB(); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void create(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + String name = rndName(); + db.createView(name, ViewType.ARANGO_SEARCH).get(); + assertThat(db.view(name).exists().get()).isTrue(); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void createWithNotNormalizedName(ArangoDatabaseAsync db) { + assumeTrue(supportsExtendedNames()); + final String name = "view-\u006E\u0303\u00f1"; + Throwable thrown = catchThrowable(() -> db.createView(name, ViewType.ARANGO_SEARCH).get()).getCause(); + assertThat(thrown) + .isInstanceOf(ArangoDBException.class) + .hasMessageContaining("normalized") + .extracting(it -> ((ArangoDBException) it).getResponseCode()).isEqualTo(400); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void getInfo(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + String name = rndName(); + db.createView(name, ViewType.ARANGO_SEARCH).get(); + final ViewEntity info = db.view(name).getInfo().get(); + assertThat(info).isNotNull(); + assertThat(info.getId()).isNotNull(); + assertThat(info.getName()).isEqualTo(name); + assertThat(info.getType()).isEqualTo(ViewType.ARANGO_SEARCH); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void getInfoSearchAlias(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 10)); + String name = rndName(); + db.createView(name, ViewType.SEARCH_ALIAS).get(); + final ViewEntity info = db.view(name).getInfo().get(); + assertThat(info).isNotNull(); + assertThat(info.getId()).isNotNull(); + assertThat(info.getName()).isEqualTo(name); + assertThat(info.getType()).isEqualTo(ViewType.SEARCH_ALIAS); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void getViews(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 10)); + String name1 = rndName(); + String name2 = rndName(); + db.createView(name1, ViewType.ARANGO_SEARCH).get(); + db.createView(name2, ViewType.SEARCH_ALIAS).get(); + Collection views = db.getViews().get(); + assertThat(views).extracting(ViewEntity::getName).contains(name1, name2); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void drop(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + String name = rndName(); + db.createView(name, ViewType.ARANGO_SEARCH).get(); + final ArangoViewAsync view = db.view(name); + view.drop().get(); + assertThat(view.exists().get()).isFalse(); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void rename(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isSingleServer()); + String oldName = rndName(); + String newName = rndName(); + + db.createView(oldName, ViewType.ARANGO_SEARCH).get(); + db.view(oldName).rename(newName).get(); + assertThat(db.view(oldName).exists().get()).isFalse(); + assertThat(db.view(newName).exists().get()).isTrue(); + } + +} diff --git a/test-functional/src/test/java/com/arangodb/ArangoViewTest.java b/test-functional/src/test/java/com/arangodb/ArangoViewTest.java new file mode 100644 index 000000000..6a2d67c2d --- /dev/null +++ b/test-functional/src/test/java/com/arangodb/ArangoViewTest.java @@ -0,0 +1,127 @@ +/* + * DISCLAIMER + * + * Copyright 2018 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb; + +import com.arangodb.entity.ViewEntity; +import com.arangodb.entity.ViewType; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; + +import java.util.Collection; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.catchThrowable; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + + +/** + * @author Mark Vollmary + * @author Michele Rastelli + */ +class ArangoViewTest extends BaseJunit5 { + + @BeforeAll + static void init() { + initDB(); + } + + @ParameterizedTest + @MethodSource("dbs") + void create(ArangoDatabase db) { + String name = rndName(); + db.createView(name, ViewType.ARANGO_SEARCH); + assertThat(db.view(name).exists()).isTrue(); + } + + @ParameterizedTest + @MethodSource("dbs") + void createWithNotNormalizedName(ArangoDatabase db) { + assumeTrue(supportsExtendedNames()); + final String name = "view-\u006E\u0303\u00f1"; + Throwable thrown = catchThrowable(() -> db.createView(name, ViewType.ARANGO_SEARCH)); + assertThat(thrown) + .isInstanceOf(ArangoDBException.class) + .hasMessageContaining("normalized") + .extracting(it -> ((ArangoDBException) it).getResponseCode()).isEqualTo(400); + } + + @ParameterizedTest + @MethodSource("dbs") + void getInfo(ArangoDatabase db) { + String name = rndName(); + db.createView(name, ViewType.ARANGO_SEARCH); + final ViewEntity info = db.view(name).getInfo(); + assertThat(info).isNotNull(); + assertThat(info.getId()).isNotNull(); + assertThat(info.getName()).isEqualTo(name); + assertThat(info.getType()).isEqualTo(ViewType.ARANGO_SEARCH); + } + + @ParameterizedTest + @MethodSource("dbs") + void getInfoSearchAlias(ArangoDatabase db) { + assumeTrue(isAtLeastVersion(3, 10)); + String name = rndName(); + db.createView(name, ViewType.SEARCH_ALIAS); + final ViewEntity info = db.view(name).getInfo(); + assertThat(info).isNotNull(); + assertThat(info.getId()).isNotNull(); + assertThat(info.getName()).isEqualTo(name); + assertThat(info.getType()).isEqualTo(ViewType.SEARCH_ALIAS); + } + + @ParameterizedTest + @MethodSource("dbs") + void getViews(ArangoDatabase db) { + assumeTrue(isAtLeastVersion(3, 10)); + String name1 = rndName(); + String name2 = rndName(); + db.createView(name1, ViewType.ARANGO_SEARCH); + db.createView(name2, ViewType.SEARCH_ALIAS); + Collection views = db.getViews(); + assertThat(views).extracting(ViewEntity::getName).contains(name1, name2); + } + + @ParameterizedTest + @MethodSource("dbs") + void drop(ArangoDatabase db) { + String name = rndName(); + db.createView(name, ViewType.ARANGO_SEARCH); + final ArangoView view = db.view(name); + view.drop(); + assertThat(view.exists()).isFalse(); + } + + @ParameterizedTest + @MethodSource("dbs") + void rename(ArangoDatabase db) { + assumeTrue(isSingleServer()); + String oldName = rndName(); + String newName = rndName(); + + db.createView(oldName, ViewType.ARANGO_SEARCH); + db.view(oldName).rename(newName); + assertThat(db.view(oldName).exists()).isFalse(); + assertThat(db.view(newName).exists()).isTrue(); + } + +} diff --git a/test-functional/src/test/java/com/arangodb/BaseJunit5.java b/test-functional/src/test/java/com/arangodb/BaseJunit5.java new file mode 100644 index 000000000..d5c491361 --- /dev/null +++ b/test-functional/src/test/java/com/arangodb/BaseJunit5.java @@ -0,0 +1,225 @@ +package com.arangodb; + +import com.arangodb.config.ArangoConfigProperties; +import com.arangodb.config.ConfigUtils; +import com.arangodb.entity.*; +import com.arangodb.model.CollectionCreateOptions; +import com.arangodb.model.GraphCreateOptions; +import com.arangodb.util.TestUtils; +import com.fasterxml.jackson.databind.node.JsonNodeFactory; +import com.fasterxml.jackson.databind.node.ObjectNode; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Named; +import org.junit.jupiter.params.provider.Arguments; + +import java.util.Arrays; +import java.util.Collection; +import java.util.List; +import java.util.UUID; +import java.util.function.Function; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static com.arangodb.util.TestUtils.TEST_DB; + +public class BaseJunit5 { + protected static final ArangoConfigProperties config = ConfigUtils.loadConfig(); + private static final ArangoDB adb = new ArangoDB.Builder() + .loadProperties(config) + .protocol(Protocol.HTTP_JSON) + .build(); + + private static final ArangoDBVersion version = adb.getVersion(); + private static final ServerRole role = adb.getRole(); + + private static final List> adbs = Arrays.stream(Protocol.values()) + .filter(p -> !p.equals(Protocol.VST) || isLessThanVersion(3, 12)) + .map(p -> Named.of(p.toString(), new ArangoDB.Builder() + .loadProperties(config) + .protocol(p) + .build())) + .collect(Collectors.toList()); + + private static Boolean extendedDbNames; + private static Boolean extendedNames; + + protected static Stream> adbsStream() { + return adbs.stream(); + } + + protected static Stream> dbsStream() { + return adbsStream().map(mapNamedPayload(p -> p.db(TEST_DB))); + } + + protected static Stream> asyncAdbsStream() { + return adbs.stream().map(mapNamedPayload(ArangoDB::async)); + } + + protected static Stream> asyncDbsStream() { + return asyncAdbsStream().map(mapNamedPayload(p -> p.db(TEST_DB))); + } + + protected static Stream arangos() { + return adbsStream().map(Arguments::of); + } + + protected static Stream asyncArangos() { + return asyncAdbsStream().map(Arguments::of); + } + + protected static Stream dbs() { + return dbsStream().map(Arguments::of); + } + + protected static Stream asyncDbs() { + return asyncDbsStream().map(Arguments::of); + } + + protected static Function, Named> mapNamedPayload(Function mapper) { + return named -> Named.of(named.getName(), mapper.apply(named.getPayload())); + } + + protected static String getJwt() { + Response response = adb.execute(Request.builder() + .method(Request.Method.POST) + .db("_system") + .path("/_open/auth") + .body(JsonNodeFactory.instance.objectNode() + .put("username", config.getUser().orElse("root")) + .put("password", config.getPassword().orElse("")) + ) + .build(), ObjectNode.class); + + return response.getBody().get("jwt").textValue(); + } + + static ArangoDatabase initDB(String name) { + ArangoDatabase database = adb.db(name); + if (!database.exists()) + database.create(); + return database; + } + + protected static ArangoDatabase initDB() { + return initDB(TEST_DB); + } + + static void dropDB(String name) { + ArangoDatabase database = adb.db(name); + if (database.exists()) + database.drop(); + } + + static void initGraph(String name, Collection edgeDefinitions, GraphCreateOptions options) { + ArangoDatabase db = initDB(); + db.createGraph(name, edgeDefinitions, options); + } + + static void initCollections(String... collections) { + ArangoDatabase db = initDB(); + for (String collection : collections) { + if (db.collection(collection).exists()) + db.collection(collection).drop(); + db.createCollection(collection, null); + } + } + + static void initEdgeCollections(String... collections) { + ArangoDatabase db = initDB(); + for (String collection : collections) { + if (db.collection(collection).exists()) + db.collection(collection).drop(); + db.createCollection(collection, new CollectionCreateOptions().type(CollectionType.EDGES)); + } + } + + @BeforeAll + static void init() { + dropDB(TEST_DB); + } + + @AfterAll + static void shutdown() { + dropDB(TEST_DB); + } + + protected String getTestDb() { + return TEST_DB; + } + + public static String rnd() { + return UUID.randomUUID().toString(); + } + + public static synchronized boolean supportsExtendedDbNames() { + if (extendedDbNames == null) { + try { + ArangoDatabase testDb = adb + .db("test-" + TestUtils.generateRandomName(true, 20)); + testDb.create(); + extendedDbNames = true; + testDb.drop(); + } catch (ArangoDBException e) { + extendedDbNames = false; + } + } + return extendedDbNames; + } + + public static synchronized boolean supportsExtendedNames() { + if (extendedNames == null) { + try { + ArangoCollection testCol = adb.db() + .collection("test-" + TestUtils.generateRandomName(true, 20)); + testCol.create(); + extendedNames = true; + testCol.drop(); + } catch (ArangoDBException e) { + extendedNames = false; + } + } + return extendedNames; + } + + public static String rndDbName() { + return "testDB-" + TestUtils.generateRandomName(supportsExtendedDbNames(), 20); + } + + public static String rndName() { + return "dd-" + TestUtils.generateRandomName(supportsExtendedNames(), 20); + } + + public static boolean isAtLeastVersion(final int major, final int minor) { + return isAtLeastVersion(major, minor, 0); + } + + public static boolean isAtLeastVersion(final int major, final int minor, final int patch) { + return TestUtils.isAtLeastVersion(version.getVersion(), major, minor, patch); + } + + public static boolean isLessThanVersion(final int major, final int minor) { + return isLessThanVersion(major, minor, 0); + } + + public static boolean isLessThanVersion(final int major, final int minor, final int patch) { + return TestUtils.isLessThanVersion(version.getVersion(), major, minor, patch); + } + + public static boolean isStorageEngine(ArangoDBEngine.StorageEngineName name) { + return name.equals(adb.getEngine().getName()); + } + + public static boolean isSingleServer() { + return role == ServerRole.SINGLE; + } + + public static boolean isCluster() { + return role == ServerRole.COORDINATOR; + } + + public static boolean isEnterprise() { + return version.getLicense() == License.ENTERPRISE; + } + +} diff --git a/test-functional/src/test/java/com/arangodb/CompressionTest.java b/test-functional/src/test/java/com/arangodb/CompressionTest.java new file mode 100644 index 000000000..fa81c5bef --- /dev/null +++ b/test-functional/src/test/java/com/arangodb/CompressionTest.java @@ -0,0 +1,59 @@ +package com.arangodb; + +import com.arangodb.config.ArangoConfigProperties; +import com.fasterxml.jackson.databind.JsonNode; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.EnumSource; + +import java.util.Locale; +import java.util.UUID; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + +/** + * Non-exhaustive tests of content encoding, executed during integration and native tests. + * A complete test is at test-resilience/src/test/java/resilience/compression/CompressionTest.java + * + * @author Michele Rastelli + */ +class CompressionTest extends BaseJunit5 { + + @ParameterizedTest + @EnumSource(Protocol.class) + void gzip(Protocol protocol) { + doTest(protocol, Compression.GZIP); + } + + @ParameterizedTest + @EnumSource(Protocol.class) + void deflate(Protocol protocol) { + doTest(protocol, Compression.DEFLATE); + } + + void doTest(Protocol protocol, Compression compression) { + assumeTrue(isAtLeastVersion(3, 12)); + assumeTrue(protocol != Protocol.VST); + + ArangoDB adb = new ArangoDB.Builder() + .loadProperties(ArangoConfigProperties.fromFile()) + .protocol(protocol) + .compression(compression) + .compressionThreshold(0) + .compressionLevel(3) + .build(); + + Response resp = adb.execute(Request.builder() + .method(Request.Method.POST) + .path("/_admin/echo") + .body(UUID.randomUUID().toString()) + .build(), JsonNode.class); + + String encoding = compression.toString().toLowerCase(Locale.ROOT); + String reqAcceptEncoding = resp.getBody().get("headers").get("accept-encoding").textValue(); + assertThat(reqAcceptEncoding).contains(encoding); + + adb.shutdown(); + } + +} diff --git a/test-functional/src/test/java/com/arangodb/ConcurrencyAsyncTests.java b/test-functional/src/test/java/com/arangodb/ConcurrencyAsyncTests.java new file mode 100644 index 000000000..2b9c20e09 --- /dev/null +++ b/test-functional/src/test/java/com/arangodb/ConcurrencyAsyncTests.java @@ -0,0 +1,103 @@ +package com.arangodb; + +import com.arangodb.config.ConfigUtils; +import com.arangodb.entity.ArangoDBVersion; +import com.arangodb.util.SlowTest; +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Timeout; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.EnumSource; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +import static org.junit.jupiter.api.Assertions.fail; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + +class ConcurrencyAsyncTests { + + @SlowTest + @ParameterizedTest + @EnumSource(Protocol.class) + @Timeout(2) + void executorLimit(Protocol protocol) { + assumeTrue(!protocol.equals(Protocol.VST) || BaseJunit5.isLessThanVersion(3, 12)); + + ExecutorService asyncExecutor = Executors.newCachedThreadPool(); + ArangoDBAsync adb = new ArangoDB.Builder() + .loadProperties(ConfigUtils.loadConfig()) + .maxConnections(1) + .protocol(protocol) + .build().async(); + + List> futures = IntStream.range(0, 20) + .mapToObj(i -> adb.getVersion() + .whenCompleteAsync((dbVersion, ex) -> { + try { + Thread.sleep(1000); + } catch (InterruptedException e) { + e.printStackTrace(); + } + }, asyncExecutor)) + .collect(Collectors.toList()); + + futures.forEach(future -> { + try { + future.get(); + } catch (InterruptedException | ExecutionException e) { + e.printStackTrace(); + fail(); + } + }); + adb.shutdown(); + asyncExecutor.shutdown(); + } + + + @Disabled + @ParameterizedTest + @EnumSource(Protocol.class) + @Timeout(2) + void outgoingRequestsParallelismTest(Protocol protocol) throws ExecutionException, InterruptedException { + ArangoDBAsync adb = new ArangoDB.Builder() + .loadProperties(ConfigUtils.loadConfig()) + .maxConnections(20) + .protocol(protocol).build().async(); + + List> reqs = new ArrayList<>(); + for (int i = 0; i < 50_000; i++) { + reqs.add(adb.getVersion()); + } + for (CompletableFuture req : reqs) { + req.get(); + } + adb.shutdown(); + } + + @SlowTest + @ParameterizedTest + @EnumSource(Protocol.class) + void concurrentPendingRequests(Protocol protocol) throws ExecutionException, InterruptedException { + assumeTrue(!protocol.equals(Protocol.VST) || BaseJunit5.isLessThanVersion(3, 12)); + + ArangoDBAsync adb = new ArangoDB.Builder() + .loadProperties(ConfigUtils.loadConfig()) + .protocol(protocol).build().async(); + ExecutorService es = Executors.newFixedThreadPool(10); + List> futures = IntStream.range(0, 10) + .mapToObj(__ -> CompletableFuture.runAsync(() -> adb.db().query("RETURN SLEEP(1)", Void.class), es)) + .collect(Collectors.toList()); + for (CompletableFuture f : futures) { + f.get(); + } + adb.shutdown(); + es.shutdown(); + } + +} diff --git a/test-functional/src/test/java/com/arangodb/ConcurrencyTests.java b/test-functional/src/test/java/com/arangodb/ConcurrencyTests.java new file mode 100644 index 000000000..80aaa3ff9 --- /dev/null +++ b/test-functional/src/test/java/com/arangodb/ConcurrencyTests.java @@ -0,0 +1,40 @@ +package com.arangodb; + +import com.arangodb.config.ConfigUtils; +import com.arangodb.util.SlowTest; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.EnumSource; + +import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +import static org.junit.jupiter.api.Assumptions.assumeTrue; + +class ConcurrencyTests { + + @SlowTest + @ParameterizedTest + @EnumSource(Protocol.class) + void concurrentPendingRequests(Protocol protocol) throws ExecutionException, InterruptedException { + assumeTrue(!protocol.equals(Protocol.VST) || BaseJunit5.isLessThanVersion(3, 12)); + + ExecutorService es = Executors.newFixedThreadPool(10); + ArangoDB adb = new ArangoDB.Builder() + .loadProperties(ConfigUtils.loadConfig()) + .protocol(protocol).build(); + List> futures = IntStream.range(0, 10) + .mapToObj(__ -> CompletableFuture.runAsync(() -> adb.db().query("RETURN SLEEP(1)", Void.class), es)) + .collect(Collectors.toList()); + for (CompletableFuture f : futures) { + f.get(); + } + adb.shutdown(); + es.shutdown(); + } + +} diff --git a/test-functional/src/test/java/com/arangodb/ConsumerThreadAsyncTest.java b/test-functional/src/test/java/com/arangodb/ConsumerThreadAsyncTest.java new file mode 100644 index 000000000..c7c219d06 --- /dev/null +++ b/test-functional/src/test/java/com/arangodb/ConsumerThreadAsyncTest.java @@ -0,0 +1,34 @@ +package com.arangodb; + +import com.arangodb.config.ArangoConfigProperties; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.EnumSource; + +import java.util.concurrent.ExecutionException; + +import static org.junit.jupiter.api.Assumptions.assumeTrue; + +public class ConsumerThreadAsyncTest extends BaseJunit5 { + + @ParameterizedTest + @EnumSource(Protocol.class) + void nestedRequests(Protocol protocol) throws ExecutionException, InterruptedException { + assumeTrue(!protocol.equals(Protocol.VST) || BaseJunit5.isLessThanVersion(3, 12)); + + ArangoDBAsync adb = new ArangoDB.Builder() + .loadProperties(ArangoConfigProperties.fromFile()) + .protocol(protocol) + .maxConnections(1) + .build() + .async(); + + adb.getVersion() + .thenCompose(it -> adb.getVersion()) + .thenCompose(it -> adb.getVersion()) + .thenCompose(it -> adb.getVersion()) + .get(); + + adb.shutdown(); + } + +} diff --git a/test-functional/src/test/java/com/arangodb/DocumentTest.java b/test-functional/src/test/java/com/arangodb/DocumentTest.java new file mode 100644 index 000000000..147346d0e --- /dev/null +++ b/test-functional/src/test/java/com/arangodb/DocumentTest.java @@ -0,0 +1,138 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb; + +import com.arangodb.entity.BaseDocument; +import com.arangodb.entity.DocumentCreateEntity; +import com.arangodb.util.RawJson; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; + +import java.util.Map; +import java.util.UUID; +import java.util.stream.Stream; + +import static org.assertj.core.api.Assertions.assertThat; + + +/** + * @author Mark Vollmary + * @author Michele Rastelli + */ +class DocumentTest extends BaseJunit5 { + + private static final String COLLECTION_NAME = "DocumentTest_collection"; + + private static Stream cols() { + return dbsStream() + .map(mapNamedPayload(db -> db.collection(COLLECTION_NAME))) + .map(Arguments::of); + } + + + @BeforeAll + static void init() { + initCollections(COLLECTION_NAME); + } + + @ParameterizedTest + @MethodSource("cols") + void insertAsJson(ArangoCollection collection) { + //@formatter:off + final RawJson json = RawJson.of( + "{" + + "\"article\": {" + + "\"artist\": \"PREGARDIEN/RHEINISCHE KANTOREI/DAS\"," + + "\"releaseDate\": \"1970-01-01\"," + + "\"composer\": \"BACH\"," + + "\"format\": \"CD\"," + + "\"vat\": \"H\"," + + "\"carriers\": 1," + + "\"label\": \"CAPRICCIO\"," + + "\"title\": \"BACH ST MATTHEW PASSION BWV244\"," + + "\"barcode\": [" + + "\"4006408600466\"" + + "]," + + "\"conductor\": \"MAX, H.\"" + + "}," + + "\"stock\": {" + + "\"status\": \"RMV\"," + + "\"lastUpdate\": \"2016-11-01 00:00\"" + + "}" + + "}" + ); + //@formatter:on + final DocumentCreateEntity createResult = collection.insertDocument(json); + final BaseDocument doc = collection.getDocument(createResult.getKey(), BaseDocument.class); + assertThat(doc).isNotNull(); + final Object article = doc.getAttribute("article"); + assertThat(article).isNotNull(); + final Object artist = ((Map) article).get("artist"); + assertThat(artist).isNotNull(); + assertThat(artist.toString()).isEqualTo("PREGARDIEN/RHEINISCHE KANTOREI/DAS"); + } + + @ParameterizedTest + @MethodSource("cols") + void insertAsBaseDocument(ArangoCollection collection) { + final BaseDocument document = new BaseDocument(UUID.randomUUID().toString()); + { + final BaseDocument article = new BaseDocument(UUID.randomUUID().toString()); + document.addAttribute("article", article); + article.addAttribute("artist", "PREGARDIEN/RHEINISCHE KANTOREI/DAS"); + article.addAttribute("releaseDate", "1970-01-01"); + article.addAttribute("composer", "BACH"); + article.addAttribute("format", "CD"); + article.addAttribute("vat", "H"); + article.addAttribute("carriers", 1); + article.addAttribute("label", "CAPRICCIO"); + article.addAttribute("title", "BACH ST MATTHEW PASSION BWV244"); + article.addAttribute("barcode", new String[]{"4006408600466"}); + article.addAttribute("conductor", "MAX, H."); + final BaseDocument stock = new BaseDocument(UUID.randomUUID().toString()); + document.addAttribute("stock", stock); + stock.addAttribute("status", "RMV"); + stock.addAttribute("lastUpdate", "2016-11-01 00:00"); + } + final DocumentCreateEntity createResult = collection.insertDocument(document); + final BaseDocument doc = collection.getDocument(createResult.getKey(), BaseDocument.class); + assertThat(doc).isNotNull(); + final Object article = doc.getAttribute("article"); + assertThat(article).isNotNull(); + final Object artist = ((Map) article).get("artist"); + assertThat(artist).isNotNull(); + assertThat(artist.toString()).isEqualTo("PREGARDIEN/RHEINISCHE KANTOREI/DAS"); + } + + @ParameterizedTest + @MethodSource("cols") + void documentKeyWithSpecialChars(ArangoCollection collection) { + final String key = "_-:.@()+,=;$!*'%" + UUID.randomUUID(); + final BaseDocument document = new BaseDocument(key); + final DocumentCreateEntity createResult = collection.insertDocument(document); + final BaseDocument doc = collection.getDocument(createResult.getKey(), BaseDocument.class); + assertThat(doc).isNotNull(); + assertThat(doc.getKey()).isEqualTo(key); + } + +} diff --git a/test-functional/src/test/java/com/arangodb/InvertedIndexAsyncTest.java b/test-functional/src/test/java/com/arangodb/InvertedIndexAsyncTest.java new file mode 100644 index 000000000..fe9620264 --- /dev/null +++ b/test-functional/src/test/java/com/arangodb/InvertedIndexAsyncTest.java @@ -0,0 +1,208 @@ +package com.arangodb; + +import com.arangodb.entity.*; +import com.arangodb.entity.arangosearch.*; +import com.arangodb.entity.arangosearch.analyzer.DelimiterAnalyzer; +import com.arangodb.entity.arangosearch.analyzer.DelimiterAnalyzerProperties; +import com.arangodb.model.InvertedIndexOptions; +import com.arangodb.model.PersistentIndexOptions; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; + +import java.util.*; +import java.util.concurrent.ExecutionException; +import java.util.stream.Stream; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + +public class InvertedIndexAsyncTest extends BaseJunit5 { + + private static final String COLLECTION_NAME = "InvertedIndexTest_collection"; + + private static Stream asyncCols() { + return asyncDbsStream().map(mapNamedPayload(db -> db.collection(COLLECTION_NAME))).map(Arguments::of); + } + + @BeforeAll + static void init() { + initCollections(COLLECTION_NAME); + } + + private void createAnalyzer(String analyzerName, ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + Set features = new HashSet<>(); + features.add(AnalyzerFeature.frequency); + features.add(AnalyzerFeature.norm); + features.add(AnalyzerFeature.position); + + DelimiterAnalyzer da = new DelimiterAnalyzer(); + da.setName(analyzerName); + da.setFeatures(features); + DelimiterAnalyzerProperties props = new DelimiterAnalyzerProperties(); + props.setDelimiter("-"); + da.setProperties(props); + + db.createSearchAnalyzer(da).get(); + } + + private InvertedIndexOptions createOptions(String analyzerName) { + Boolean cache = isEnterprise() ? true : null; + Boolean fieldCache = cache != null ? false : null; + InvertedIndexField field = new InvertedIndexField() + .name("foo") + .analyzer(AnalyzerType.identity.toString()) + .includeAllFields(true) + .searchField(false) + .trackListPositions(false) + .cache(fieldCache) + .features( + AnalyzerFeature.position, + AnalyzerFeature.frequency, + AnalyzerFeature.norm, + AnalyzerFeature.offset + ); + + if (isEnterprise()) { + field.nested( + new InvertedIndexField() + .name("bar") + .analyzer(analyzerName) + .searchField(true) + .features(AnalyzerFeature.position, AnalyzerFeature.frequency) + .nested( + new InvertedIndexField() + .name("baz") + .analyzer(AnalyzerType.identity.toString()) + .searchField(false) + .features(AnalyzerFeature.frequency) + ) + ); + } + + return new InvertedIndexOptions() + .name(rndName()) + .inBackground(true) + .parallelism(5) + .primarySort(new InvertedIndexPrimarySort() + .fields( + new InvertedIndexPrimarySort.Field("f1", InvertedIndexPrimarySort.Field.Direction.asc), + new InvertedIndexPrimarySort.Field("f2", InvertedIndexPrimarySort.Field.Direction.desc) + ) + .compression(ArangoSearchCompression.lz4) + .cache(cache) + ) + .storedValues(new StoredValue(Arrays.asList("f3", "f4"), ArangoSearchCompression.none, cache)) + .optimizeTopK("BM25(@doc) DESC", "TFIDF(@doc) DESC") + .analyzer(analyzerName) + .features(AnalyzerFeature.position, AnalyzerFeature.frequency) + .includeAllFields(false) + .trackListPositions(true) + .searchField(true) + .fields(field) + .consolidationIntervalMsec(11L) + .commitIntervalMsec(22L) + .cleanupIntervalStep(33L) + .consolidationPolicy(ConsolidationPolicy.of(ConsolidationType.TIER) + .segmentsMin(3L) + .segmentsMax(44L) + .segmentsBytesMax(55555L) + .segmentsBytesFloor(666L) + .minScore(77L) + ) + .writebufferIdle(44L) + .writebufferActive(55L) + .writebufferSizeMax(66L) + .cache(cache) + .primaryKeyCache(cache); + } + + private void assertCorrectIndexEntity(InvertedIndexEntity indexResult, InvertedIndexOptions options) { + assertThat(indexResult).isNotNull(); + assertThat(indexResult.getId()).isNotNull().isNotEmpty(); + // FIXME: in single server this is null + // assertThat(indexResult.getIsNewlyCreated()).isTrue(); + assertThat(indexResult.getUnique()).isFalse(); + assertThat(indexResult.getSparse()).isTrue(); + assertThat(indexResult.getVersion()).isNotNull(); + assertThat(indexResult.getCode()).isNotNull(); + assertThat(indexResult.getType()).isEqualTo(IndexType.inverted); + assertThat(indexResult.getName()).isEqualTo(options.getName()); + assertThat(indexResult.getFields()).containsExactlyElementsOf(options.getFields()); + assertThat(indexResult.getSearchField()).isEqualTo(options.getSearchField()); + assertThat(indexResult.getStoredValues()).containsExactlyElementsOf(options.getStoredValues()); + assertThat(indexResult.getPrimarySort()).isEqualTo(options.getPrimarySort()); + assertThat(indexResult.getAnalyzer()).isEqualTo(options.getAnalyzer()); + assertThat(indexResult.getFeatures()).hasSameElementsAs(options.getFeatures()); + assertThat(indexResult.getIncludeAllFields()).isEqualTo(options.getIncludeAllFields()); + assertThat(indexResult.getTrackListPositions()).isEqualTo(options.getTrackListPositions()); + assertThat(indexResult.getCleanupIntervalStep()).isEqualTo(options.getCleanupIntervalStep()); + assertThat(indexResult.getCommitIntervalMsec()).isEqualTo(options.getCommitIntervalMsec()); + assertThat(indexResult.getConsolidationIntervalMsec()).isEqualTo(options.getConsolidationIntervalMsec()); + assertThat(indexResult.getConsolidationPolicy()).isEqualTo(options.getConsolidationPolicy()); + assertThat(indexResult.getWritebufferIdle()).isEqualTo(options.getWritebufferIdle()); + assertThat(indexResult.getWritebufferActive()).isEqualTo(options.getWritebufferActive()); + assertThat(indexResult.getWritebufferSizeMax()).isEqualTo(options.getWritebufferSizeMax()); + assertThat(indexResult.getCache()).isEqualTo(options.getCache()); + assertThat(indexResult.getPrimaryKeyCache()).isEqualTo(options.getPrimaryKeyCache()); + + if (isEnterprise() && isAtLeastVersion(3, 12)) { + assertThat(indexResult.getOptimizeTopK()).containsExactlyElementsOf(options.getOptimizeTopK()); + } + } + + @ParameterizedTest + @MethodSource("asyncCols") + void createAndGetInvertedIndex(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 10)); + String analyzerName = "delimiter-" + UUID.randomUUID(); + createAnalyzer(analyzerName, collection.db()); + InvertedIndexOptions options = createOptions(analyzerName); + InvertedIndexEntity created = collection.ensureInvertedIndex(options).get(); + assertCorrectIndexEntity(created, options); + InvertedIndexEntity loadedIndex = collection.getInvertedIndex(created.getName()).get(); + assertCorrectIndexEntity(loadedIndex, options); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void getInvertedIndexesShouldNotReturnOtherIndexTypes(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 10)); + + // create persistent index + collection.ensurePersistentIndex(Collections.singletonList("foo"), new PersistentIndexOptions().name("persistentIndex")); + + // create inverted index + String analyzerName = "delimiter-" + UUID.randomUUID(); + createAnalyzer(analyzerName, collection.db()); + InvertedIndexOptions options = createOptions(analyzerName); + InvertedIndexEntity created = collection.ensureInvertedIndex(options).get(); + + Collection loadedIndexes = collection.getInvertedIndexes().get(); + assertThat(loadedIndexes).map(InvertedIndexEntity::getName) + .doesNotContain("persistentIndex") + .contains(created.getName()); + } + + @ParameterizedTest + @MethodSource("asyncCols") + void getIndexesShouldNotReturnInvertedIndexes(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { + assumeTrue(isAtLeastVersion(3, 10)); + + // create persistent index + collection.ensurePersistentIndex(Collections.singletonList("foo"), new PersistentIndexOptions().name("persistentIndex")); + + // create inverted index + String analyzerName = "delimiter-" + UUID.randomUUID(); + createAnalyzer(analyzerName, collection.db()); + InvertedIndexOptions options = createOptions(analyzerName); + InvertedIndexEntity created = collection.ensureInvertedIndex(options).get(); + + Collection loadedIndexes = collection.getIndexes().get(); + assertThat(loadedIndexes).map(IndexEntity::getName) + .doesNotContain(created.getName()) + .contains("persistentIndex"); + } + +} diff --git a/test-functional/src/test/java/com/arangodb/InvertedIndexTest.java b/test-functional/src/test/java/com/arangodb/InvertedIndexTest.java new file mode 100644 index 000000000..7476b4c4f --- /dev/null +++ b/test-functional/src/test/java/com/arangodb/InvertedIndexTest.java @@ -0,0 +1,207 @@ +package com.arangodb; + +import com.arangodb.entity.*; +import com.arangodb.entity.arangosearch.*; +import com.arangodb.entity.arangosearch.analyzer.DelimiterAnalyzer; +import com.arangodb.entity.arangosearch.analyzer.DelimiterAnalyzerProperties; +import com.arangodb.model.InvertedIndexOptions; +import com.arangodb.model.PersistentIndexOptions; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; + +import java.util.*; +import java.util.stream.Stream; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + +public class InvertedIndexTest extends BaseJunit5 { + + private static final String COLLECTION_NAME = "InvertedIndexTest_collection"; + + private static Stream cols() { + return dbsStream().map(mapNamedPayload(db -> db.collection(COLLECTION_NAME))).map(Arguments::of); + } + + @BeforeAll + static void init() { + initCollections(COLLECTION_NAME); + } + + private void createAnalyzer(String analyzerName, ArangoDatabase db) { + Set features = new HashSet<>(); + features.add(AnalyzerFeature.frequency); + features.add(AnalyzerFeature.norm); + features.add(AnalyzerFeature.position); + + DelimiterAnalyzer da = new DelimiterAnalyzer(); + da.setName(analyzerName); + da.setFeatures(features); + DelimiterAnalyzerProperties props = new DelimiterAnalyzerProperties(); + props.setDelimiter("-"); + da.setProperties(props); + + db.createSearchAnalyzer(da); + } + + private InvertedIndexOptions createOptions(String analyzerName) { + Boolean cache = isEnterprise() ? true : null; + Boolean fieldCache = cache != null ? false : null; + InvertedIndexField field = new InvertedIndexField() + .name("foo") + .analyzer(AnalyzerType.identity.toString()) + .includeAllFields(true) + .searchField(false) + .trackListPositions(false) + .cache(fieldCache) + .features( + AnalyzerFeature.position, + AnalyzerFeature.frequency, + AnalyzerFeature.norm, + AnalyzerFeature.offset + ); + + if (isEnterprise()) { + field.nested( + new InvertedIndexField() + .name("bar") + .analyzer(analyzerName) + .searchField(true) + .features(AnalyzerFeature.position, AnalyzerFeature.frequency) + .nested( + new InvertedIndexField() + .name("baz") + .analyzer(AnalyzerType.identity.toString()) + .searchField(false) + .features(AnalyzerFeature.frequency) + ) + ); + } + + return new InvertedIndexOptions() + .name(rndName()) + .inBackground(true) + .parallelism(5) + .primarySort(new InvertedIndexPrimarySort() + .fields( + new InvertedIndexPrimarySort.Field("f1", InvertedIndexPrimarySort.Field.Direction.asc), + new InvertedIndexPrimarySort.Field("f2", InvertedIndexPrimarySort.Field.Direction.desc) + ) + .compression(ArangoSearchCompression.lz4) + .cache(cache) + ) + .storedValues(new StoredValue(Arrays.asList("f3", "f4"), ArangoSearchCompression.none, cache)) + .optimizeTopK("BM25(@doc) DESC", "TFIDF(@doc) DESC") + .analyzer(analyzerName) + .features(AnalyzerFeature.position, AnalyzerFeature.frequency) + .includeAllFields(false) + .trackListPositions(true) + .searchField(true) + .fields(field) + .consolidationIntervalMsec(11L) + .commitIntervalMsec(22L) + .cleanupIntervalStep(33L) + .consolidationPolicy(ConsolidationPolicy.of(ConsolidationType.TIER) + .segmentsMin(3L) + .segmentsMax(44L) + .segmentsBytesMax(55555L) + .segmentsBytesFloor(666L) + .minScore(77L) + ) + .writebufferIdle(44L) + .writebufferActive(55L) + .writebufferSizeMax(66L) + .cache(cache) + .primaryKeyCache(cache); + } + + private void assertCorrectIndexEntity(InvertedIndexEntity indexResult, InvertedIndexOptions options) { + assertThat(indexResult).isNotNull(); + assertThat(indexResult.getId()).isNotNull().isNotEmpty(); + // FIXME: in single server this is null + // assertThat(indexResult.getIsNewlyCreated()).isTrue(); + assertThat(indexResult.getUnique()).isFalse(); + assertThat(indexResult.getSparse()).isTrue(); + assertThat(indexResult.getVersion()).isNotNull(); + assertThat(indexResult.getCode()).isNotNull(); + assertThat(indexResult.getType()).isEqualTo(IndexType.inverted); + assertThat(indexResult.getName()).isEqualTo(options.getName()); + assertThat(indexResult.getFields()).containsExactlyElementsOf(options.getFields()); + assertThat(indexResult.getSearchField()).isEqualTo(options.getSearchField()); + assertThat(indexResult.getStoredValues()).containsExactlyElementsOf(options.getStoredValues()); + assertThat(indexResult.getPrimarySort()).isEqualTo(options.getPrimarySort()); + assertThat(indexResult.getAnalyzer()).isEqualTo(options.getAnalyzer()); + assertThat(indexResult.getFeatures()).hasSameElementsAs(options.getFeatures()); + assertThat(indexResult.getIncludeAllFields()).isEqualTo(options.getIncludeAllFields()); + assertThat(indexResult.getTrackListPositions()).isEqualTo(options.getTrackListPositions()); + assertThat(indexResult.getCleanupIntervalStep()).isEqualTo(options.getCleanupIntervalStep()); + assertThat(indexResult.getCommitIntervalMsec()).isEqualTo(options.getCommitIntervalMsec()); + assertThat(indexResult.getConsolidationIntervalMsec()).isEqualTo(options.getConsolidationIntervalMsec()); + assertThat(indexResult.getConsolidationPolicy()).isEqualTo(options.getConsolidationPolicy()); + assertThat(indexResult.getWritebufferIdle()).isEqualTo(options.getWritebufferIdle()); + assertThat(indexResult.getWritebufferActive()).isEqualTo(options.getWritebufferActive()); + assertThat(indexResult.getWritebufferSizeMax()).isEqualTo(options.getWritebufferSizeMax()); + assertThat(indexResult.getCache()).isEqualTo(options.getCache()); + assertThat(indexResult.getPrimaryKeyCache()).isEqualTo(options.getPrimaryKeyCache()); + + if (isEnterprise() && isAtLeastVersion(3, 12)) { + assertThat(indexResult.getOptimizeTopK()).containsExactlyElementsOf(options.getOptimizeTopK()); + } + } + + @ParameterizedTest + @MethodSource("cols") + void createAndGetInvertedIndex(ArangoCollection collection) { + assumeTrue(isAtLeastVersion(3, 10)); + String analyzerName = "delimiter-" + UUID.randomUUID(); + createAnalyzer(analyzerName, collection.db()); + InvertedIndexOptions options = createOptions(analyzerName); + InvertedIndexEntity created = collection.ensureInvertedIndex(options); + assertCorrectIndexEntity(created, options); + InvertedIndexEntity loadedIndex = collection.getInvertedIndex(created.getName()); + assertCorrectIndexEntity(loadedIndex, options); + } + + @ParameterizedTest + @MethodSource("cols") + void getInvertedIndexesShouldNotReturnOtherIndexTypes(ArangoCollection collection) { + assumeTrue(isAtLeastVersion(3, 10)); + + // create persistent index + collection.ensurePersistentIndex(Collections.singletonList("foo"), new PersistentIndexOptions().name("persistentIndex")); + + // create inverted index + String analyzerName = "delimiter-" + UUID.randomUUID(); + createAnalyzer(analyzerName, collection.db()); + InvertedIndexOptions options = createOptions(analyzerName); + InvertedIndexEntity created = collection.ensureInvertedIndex(options); + + Collection loadedIndexes = collection.getInvertedIndexes(); + assertThat(loadedIndexes).map(InvertedIndexEntity::getName) + .doesNotContain("persistentIndex") + .contains(created.getName()); + } + + @ParameterizedTest + @MethodSource("cols") + void getIndexesShouldNotReturnInvertedIndexes(ArangoCollection collection) { + assumeTrue(isAtLeastVersion(3, 10)); + + // create persistent index + collection.ensurePersistentIndex(Collections.singletonList("foo"), new PersistentIndexOptions().name("persistentIndex")); + + // create inverted index + String analyzerName = "delimiter-" + UUID.randomUUID(); + createAnalyzer(analyzerName, collection.db()); + InvertedIndexOptions options = createOptions(analyzerName); + InvertedIndexEntity created = collection.ensureInvertedIndex(options); + + Collection loadedIndexes = collection.getIndexes(); + assertThat(loadedIndexes).map(IndexEntity::getName) + .doesNotContain(created.getName()) + .contains("persistentIndex"); + } + +} diff --git a/test-functional/src/test/java/com/arangodb/JacksonRequestContextTest.java b/test-functional/src/test/java/com/arangodb/JacksonRequestContextTest.java new file mode 100644 index 000000000..ea82b52d1 --- /dev/null +++ b/test-functional/src/test/java/com/arangodb/JacksonRequestContextTest.java @@ -0,0 +1,144 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb; + +import com.arangodb.config.ConfigUtils; +import com.arangodb.entity.BaseDocument; +import com.arangodb.entity.DocumentCreateEntity; +import com.arangodb.entity.StreamTransactionEntity; +import com.arangodb.model.DocumentReadOptions; +import com.arangodb.model.StreamTransactionOptions; +import com.arangodb.serde.jackson.JacksonSerde; +import com.fasterxml.jackson.core.JsonParser; +import com.fasterxml.jackson.databind.DeserializationContext; +import com.fasterxml.jackson.databind.JsonDeserializer; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.module.SimpleModule; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; + +import java.io.IOException; +import java.util.Collections; +import java.util.concurrent.ExecutionException; + +import static com.arangodb.util.TestUtils.TEST_DB; +import static org.assertj.core.api.Assertions.assertThat; + +/** + * NB: excluded from shaded tests + */ +class JacksonRequestContextTest { + + private static final String COLLECTION_NAME = "JacksonRequestContextTest_collection"; + + private static ArangoDB arangoDB; + private static ArangoDatabase db; + private static ArangoCollection collection; + private static ArangoCollectionAsync collectionAsync; + + @BeforeAll + static void init() { + JacksonSerde serde = JacksonSerde.of(ContentType.JSON) + .configure((mapper) -> { + SimpleModule module = new SimpleModule("PersonModule"); + module.addDeserializer(Person.class, new PersonDeserializer()); + mapper.registerModule(module); + }); + arangoDB = new ArangoDB.Builder() + .loadProperties(ConfigUtils.loadConfig()) + .serde(serde).build(); + + db = arangoDB.db(TEST_DB); + if (!db.exists()) { + db.create(); + } + + collection = db.collection(COLLECTION_NAME); + collectionAsync = arangoDB.async().db(TEST_DB).collection(COLLECTION_NAME); + if (!collection.exists()) { + collection.create(); + } + } + + @AfterAll + static void shutdown() { + if (db.exists()) { + db.drop(); + } + arangoDB.shutdown(); + } + + static class PersonDeserializer extends JsonDeserializer { + @Override + public Person deserialize(JsonParser parser, DeserializationContext ctx) throws IOException { + JsonNode rootNode = parser.getCodec().readTree(parser); + Person person = new Person(rootNode.get("name").asText()); + person.txId = JacksonSerde.getRequestContext(ctx).getStreamTransactionId().get(); + return person; + } + } + + static class Person { + String name; + String txId; + + Person(String name) { + this.name = name; + } + } + + @Test + void getDocumentWithinTx() { + DocumentCreateEntity doc = collection.insertDocument( + new BaseDocument(Collections.singletonMap("name", "foo")), null); + + StreamTransactionEntity tx = db + .beginStreamTransaction(new StreamTransactionOptions().readCollections(COLLECTION_NAME)); + + Person read = collection.getDocument(doc.getKey(), Person.class, + new DocumentReadOptions().streamTransactionId(tx.getId())); + + assertThat(read.name).isEqualTo("foo"); + assertThat(read.txId).isEqualTo(tx.getId()); + + db.abortStreamTransaction(tx.getId()); + } + + @Test + void asyncGetDocumentWithinTx() throws ExecutionException, InterruptedException { + DocumentCreateEntity doc = collection.insertDocument( + new BaseDocument(Collections.singletonMap("name", "foo")), null); + + StreamTransactionEntity tx = db + .beginStreamTransaction(new StreamTransactionOptions().readCollections(COLLECTION_NAME)); + + Person read = collectionAsync.getDocument(doc.getKey(), Person.class, + new DocumentReadOptions().streamTransactionId(tx.getId())) + .get(); + + assertThat(read.name).isEqualTo("foo"); + assertThat(read.txId).isEqualTo(tx.getId()); + + db.abortStreamTransaction(tx.getId()); + } + +} diff --git a/test-functional/src/test/java/com/arangodb/JwtAuthAsyncTest.java b/test-functional/src/test/java/com/arangodb/JwtAuthAsyncTest.java new file mode 100644 index 000000000..f4c63d7d0 --- /dev/null +++ b/test-functional/src/test/java/com/arangodb/JwtAuthAsyncTest.java @@ -0,0 +1,107 @@ +package com.arangodb; + +import com.arangodb.config.ArangoConfigProperties; +import com.arangodb.config.ConfigUtils; +import com.arangodb.internal.ArangoRequestParam; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.EnumSource; + +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.ExecutionException; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.catchThrowable; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + + +/** + * @author Michele Rastelli + */ +class JwtAuthAsyncTest { + + private volatile static String jwt; + + @BeforeAll + static void init() { + ArangoDB arangoDB = new ArangoDB.Builder() + .loadProperties(ConfigUtils.loadConfig()) + .build(); + jwt = getJwt(arangoDB); + arangoDB.shutdown(); + } + + private static String getJwt(ArangoDB arangoDB) { + ArangoConfigProperties conf = ConfigUtils.loadConfig(); + Map reqBody = new HashMap<>(); + reqBody.put("username", conf.getUser().orElse("root")); + reqBody.put("password", conf.getPassword().orElse(null)); + + Request req = Request.builder() + .db(ArangoRequestParam.SYSTEM) + .method(Request.Method.POST) + .path("/_open/auth") + .body(reqBody) + .build(); + + Response resp = arangoDB.execute(req, Map.class); + return (String) resp.getBody().get("jwt"); + } + + @ParameterizedTest + @EnumSource(Protocol.class) + void notAuthenticated(Protocol protocol) { + assumeTrue(!protocol.equals(Protocol.VST) || BaseJunit5.isLessThanVersion(3, 12)); + + ArangoDBAsync arangoDB = getBuilder(protocol).acquireHostList(false).build().async(); + Throwable thrown = catchThrowable(() -> arangoDB.getVersion().get()).getCause(); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + ArangoDBException e = (ArangoDBException) thrown; + assertThat(e.getResponseCode()).isEqualTo(401); + arangoDB.shutdown(); + } + + @ParameterizedTest + @EnumSource(Protocol.class) + void authenticated(Protocol protocol) throws ExecutionException, InterruptedException { + assumeTrue(!protocol.equals(Protocol.VST) || BaseJunit5.isLessThanVersion(3, 12)); + + ArangoDBAsync arangoDB = getBuilder(protocol) + .jwt(jwt) + .build() + .async(); + arangoDB.getVersion().get(); + arangoDB.shutdown(); + } + + @ParameterizedTest + @EnumSource(Protocol.class) + void updateJwt(Protocol protocol) throws ExecutionException, InterruptedException { + assumeTrue(protocol != Protocol.VST, "DE-423"); + ArangoDBAsync arangoDB = getBuilder(protocol) + .jwt(jwt) + .build() + .async(); + arangoDB.getVersion().get(); + arangoDB.updateJwt("bla"); + + Throwable thrown = catchThrowable(() -> arangoDB.getVersion().get()).getCause(); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + ArangoDBException e = (ArangoDBException) thrown; + assertThat(e.getResponseCode()).isEqualTo(401); + + arangoDB.updateJwt(jwt); + arangoDB.getVersion().get(); + arangoDB.shutdown(); + } + + private ArangoDB.Builder getBuilder(Protocol protocol) { + return new ArangoDB.Builder() + .loadProperties(ConfigUtils.loadConfig()) + .protocol(protocol) + .jwt(null) // unset credentials from properties file + .user(null) // unset credentials from properties file + .password(null); // unset credentials from properties file + } +} diff --git a/test-functional/src/test/java/com/arangodb/JwtAuthTest.java b/test-functional/src/test/java/com/arangodb/JwtAuthTest.java new file mode 100644 index 000000000..b743db4a1 --- /dev/null +++ b/test-functional/src/test/java/com/arangodb/JwtAuthTest.java @@ -0,0 +1,104 @@ +package com.arangodb; + +import com.arangodb.config.ArangoConfigProperties; +import com.arangodb.config.ConfigUtils; +import com.arangodb.internal.ArangoRequestParam; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.EnumSource; + +import java.util.HashMap; +import java.util.Map; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.catchThrowable; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + + +/** + * @author Michele Rastelli + */ +class JwtAuthTest { + + private volatile static String jwt; + + @BeforeAll + static void init() { + ArangoDB arangoDB = new ArangoDB.Builder() + .loadProperties(ConfigUtils.loadConfig()) + .build(); + jwt = getJwt(arangoDB); + arangoDB.shutdown(); + } + + private static String getJwt(ArangoDB arangoDB) { + ArangoConfigProperties conf = ConfigUtils.loadConfig(); + Map reqBody = new HashMap<>(); + reqBody.put("username", conf.getUser().orElse("root")); + reqBody.put("password", conf.getPassword().orElse(null)); + + Request req = Request.builder() + .db(ArangoRequestParam.SYSTEM) + .method(Request.Method.POST) + .path("/_open/auth") + .body(reqBody) + .build(); + + Response resp = arangoDB.execute(req, Map.class); + return (String) resp.getBody().get("jwt"); + } + + @ParameterizedTest + @EnumSource(Protocol.class) + void notAuthenticated(Protocol protocol) { + assumeTrue(!protocol.equals(Protocol.VST) || BaseJunit5.isLessThanVersion(3, 12)); + + ArangoDB arangoDB = getBuilder(protocol).acquireHostList(false).build(); + Throwable thrown = catchThrowable(arangoDB::getVersion); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + ArangoDBException e = (ArangoDBException) thrown; + assertThat(e.getResponseCode()).isEqualTo(401); + arangoDB.shutdown(); + } + + @ParameterizedTest + @EnumSource(Protocol.class) + void authenticated(Protocol protocol) { + assumeTrue(!protocol.equals(Protocol.VST) || BaseJunit5.isLessThanVersion(3, 12)); + + ArangoDB arangoDB = getBuilder(protocol) + .jwt(jwt) + .build(); + arangoDB.getVersion(); + arangoDB.shutdown(); + } + + @ParameterizedTest + @EnumSource(Protocol.class) + void updateJwt(Protocol protocol) { + assumeTrue(protocol != Protocol.VST, "DE-423"); + ArangoDB arangoDB = getBuilder(protocol) + .jwt(jwt) + .build(); + arangoDB.getVersion(); + arangoDB.updateJwt("bla"); + + Throwable thrown = catchThrowable(arangoDB::getVersion); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + ArangoDBException e = (ArangoDBException) thrown; + assertThat(e.getResponseCode()).isEqualTo(401); + + arangoDB.updateJwt(jwt); + arangoDB.getVersion(); + arangoDB.shutdown(); + } + + private ArangoDB.Builder getBuilder(Protocol protocol) { + return new ArangoDB.Builder() + .loadProperties(ConfigUtils.loadConfig()) + .protocol(protocol) + .jwt(null) // unset credentials from properties file + .user(null) // unset credentials from properties file + .password(null); // unset credentials from properties file + } +} diff --git a/test-functional/src/test/java/com/arangodb/JwtTest.java b/test-functional/src/test/java/com/arangodb/JwtTest.java new file mode 100644 index 000000000..23b18a3d3 --- /dev/null +++ b/test-functional/src/test/java/com/arangodb/JwtTest.java @@ -0,0 +1,31 @@ +package com.arangodb; + +import com.arangodb.entity.ArangoDBVersion; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.EnumSource; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + +public class JwtTest extends BaseJunit5 { + + private final String jwt = getJwt(); + + @ParameterizedTest + @EnumSource(Protocol.class) + void getVersion(Protocol p) { + assumeTrue(!p.equals(Protocol.VST) || BaseJunit5.isLessThanVersion(3, 12)); + ArangoDB.Builder builder = new ArangoDB.Builder() + .protocol(p) + .jwt(jwt); + config.getHosts().ifPresent(it -> + it.forEach(h -> + builder.host(h.getHost(), h.getPort()))); + ArangoDB adb = builder.build(); + + ArangoDBVersion version = adb.getVersion(); + assertThat(version).isNotNull(); + adb.shutdown(); + } + +} diff --git a/test-functional/src/test/java/com/arangodb/ParallelAsyncTest.java b/test-functional/src/test/java/com/arangodb/ParallelAsyncTest.java new file mode 100644 index 000000000..5a697f7f4 --- /dev/null +++ b/test-functional/src/test/java/com/arangodb/ParallelAsyncTest.java @@ -0,0 +1,43 @@ +package com.arangodb; + +import com.arangodb.config.ConfigUtils; +import com.arangodb.util.SlowTest; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.EnumSource; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.Future; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + +class ParallelAsyncTest { + + @SlowTest + @ParameterizedTest + @EnumSource(Protocol.class) + void connectionParallelism(Protocol protocol) throws InterruptedException { + assumeTrue(!protocol.equals(Protocol.VST) || BaseJunit5.isLessThanVersion(3, 12)); + + // test that connections are internally async and can have multiple pending requests + // BTS-1102: the server does not run pipelined HTTP/1.1 requests in parallel + assumeTrue(protocol != Protocol.HTTP_JSON && protocol != Protocol.HTTP_VPACK); + ArangoDBAsync adb = new ArangoDB.Builder() + .loadProperties(ConfigUtils.loadConfig()) + .protocol(protocol) + .maxConnections(1) + .build() + .async(); + + List> tasks = new ArrayList<>(); + for (int i = 0; i < 3; i++) { + tasks.add(adb.db().query("return sleep(1)", Void.class)); + } + + Thread.sleep(2_000); + assertThat(tasks).allMatch(Future::isDone); + adb.shutdown(); + } + +} diff --git a/test-functional/src/test/java/com/arangodb/ParallelTest.java b/test-functional/src/test/java/com/arangodb/ParallelTest.java new file mode 100644 index 000000000..00bf1eaba --- /dev/null +++ b/test-functional/src/test/java/com/arangodb/ParallelTest.java @@ -0,0 +1,46 @@ +package com.arangodb; + +import com.arangodb.config.ConfigUtils; +import com.arangodb.util.SlowTest; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.EnumSource; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + +class ParallelTest { + + @SlowTest + @ParameterizedTest + @EnumSource(Protocol.class) + void connectionParallelism(Protocol protocol) throws InterruptedException { + assumeTrue(!protocol.equals(Protocol.VST) || BaseJunit5.isLessThanVersion(3, 12)); + + // test that connections are internally async and can have multiple pending requests + // BTS-1102: the server does not run pipelined HTTP/1.1 requests in parallel + assumeTrue(protocol != Protocol.HTTP_JSON && protocol != Protocol.HTTP_VPACK); + ArangoDB adb = new ArangoDB.Builder() + .loadProperties(ConfigUtils.loadConfig()) + .protocol(protocol) + .maxConnections(1) + .build(); + + ExecutorService es = Executors.newFixedThreadPool(3); + List> tasks = new ArrayList<>(); + for (int i = 0; i < 3; i++) { + tasks.add(es.submit(() -> adb.db().query("return sleep(1)", Void.class))); + } + + Thread.sleep(2_000); + assertThat(tasks).allMatch(Future::isDone); + adb.shutdown(); + es.shutdown(); + } + +} diff --git a/test-functional/src/test/java/com/arangodb/RequestContextTest.java b/test-functional/src/test/java/com/arangodb/RequestContextTest.java new file mode 100644 index 000000000..f76a0fd3d --- /dev/null +++ b/test-functional/src/test/java/com/arangodb/RequestContextTest.java @@ -0,0 +1,159 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb; + +import com.arangodb.config.ConfigUtils; +import com.arangodb.entity.BaseDocument; +import com.arangodb.entity.DocumentCreateEntity; +import com.arangodb.entity.StreamTransactionEntity; +import com.arangodb.model.DocumentReadOptions; +import com.arangodb.model.StreamTransactionOptions; +import com.arangodb.serde.ArangoSerde; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.databind.DeserializationFeature; +import com.fasterxml.jackson.databind.ObjectMapper; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; + +import java.io.IOException; +import java.util.Collections; +import java.util.Objects; +import java.util.concurrent.ExecutionException; + +import static com.arangodb.util.TestUtils.TEST_DB; +import static org.assertj.core.api.Assertions.assertThat; + +/** + * NB: excluded from shaded tests + */ +class RequestContextTest { + + private static final String COLLECTION_NAME = "RequestContextTest_collection"; + + private static ArangoDB arangoDB; + private static ArangoDatabase db; + private static ArangoCollection collection; + private static ArangoCollectionAsync collectionAsync; + + @BeforeAll + static void init() { + ArangoSerde serde = new ArangoSerde() { + private ObjectMapper mapper = new ObjectMapper() + .configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); + + @Override + public byte[] serialize(Object value) { + throw new UnsupportedOperationException(); + } + + @Override + public T deserialize(byte[] content, Class clazz) { + throw new UnsupportedOperationException(); + } + + @Override + public T deserialize(byte[] content, Class clazz, RequestContext ctx) { + Objects.requireNonNull(ctx); + + if (clazz != Person.class) { + throw new UnsupportedOperationException(); + } + + try { + Person res = mapper.readValue(content, Person.class); + res.txId = ctx.getStreamTransactionId().get(); + return (T) res; + } catch (IOException e) { + throw new RuntimeException(e); + } + } + }; + + arangoDB = new ArangoDB.Builder() + .loadProperties(ConfigUtils.loadConfig()) + .serde(serde).build(); + + db = arangoDB.db(TEST_DB); + if (!db.exists()) { + db.create(); + } + + collection = db.collection(COLLECTION_NAME); + collectionAsync = arangoDB.async().db(TEST_DB).collection(COLLECTION_NAME); + if (!collection.exists()) { + collection.create(); + } + } + + @AfterAll + static void shutdown() { + if (db.exists()) { + db.drop(); + } + arangoDB.shutdown(); + } + + static class Person { + String name; + String txId; + + Person(@JsonProperty("name") String name) { + this.name = name; + } + } + + @Test + void getDocumentWithinTx() { + DocumentCreateEntity doc = collection.insertDocument( + new BaseDocument(Collections.singletonMap("name", "foo")), null); + + StreamTransactionEntity tx = db + .beginStreamTransaction(new StreamTransactionOptions().readCollections(COLLECTION_NAME)); + + Person read = collection.getDocument(doc.getKey(), Person.class, + new DocumentReadOptions().streamTransactionId(tx.getId())); + + assertThat(read.name).isEqualTo("foo"); + assertThat(read.txId).isEqualTo(tx.getId()); + + db.abortStreamTransaction(tx.getId()); + } + + @Test + void asyncGetDocumentWithinTx() throws ExecutionException, InterruptedException { + DocumentCreateEntity doc = collection.insertDocument( + new BaseDocument(Collections.singletonMap("name", "foo")), null); + + StreamTransactionEntity tx = db + .beginStreamTransaction(new StreamTransactionOptions().readCollections(COLLECTION_NAME)); + + Person read = collectionAsync.getDocument(doc.getKey(), Person.class, + new DocumentReadOptions().streamTransactionId(tx.getId())) + .get(); + + assertThat(read.name).isEqualTo("foo"); + assertThat(read.txId).isEqualTo(tx.getId()); + + db.abortStreamTransaction(tx.getId()); + } + +} diff --git a/test-functional/src/test/java/com/arangodb/SerializableTest.java b/test-functional/src/test/java/com/arangodb/SerializableTest.java new file mode 100644 index 000000000..a915a74aa --- /dev/null +++ b/test-functional/src/test/java/com/arangodb/SerializableTest.java @@ -0,0 +1,91 @@ +package com.arangodb; + +import com.arangodb.entity.BaseDocument; +import com.arangodb.entity.BaseEdgeDocument; +import com.arangodb.entity.ErrorEntity; +import com.arangodb.internal.net.ArangoDBRedirectException; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.node.JsonNodeFactory; +import org.junit.jupiter.api.Test; + +import java.io.*; +import java.util.Collections; +import java.util.List; + +import static org.assertj.core.api.Assertions.assertThat; + +class SerializableTest { + + @Test + void serializeArangoDBException() throws IOException, ClassNotFoundException { + ObjectMapper mapper = new ObjectMapper(); + JsonNode jn = JsonNodeFactory.instance.objectNode() + .put("errorMessage", "boomError") + .put("exception", "boomException") + .put("code", 11) + .put("errorNum", 22); + ErrorEntity ee = mapper.readerFor(ErrorEntity.class).readValue(jn); + ArangoDBException e = new ArangoDBException(ee); + + ArangoDBException e2 = roundTrip(e); + assertThat(e2.getException()).isEqualTo(e.getException()); + assertThat(e2.getResponseCode()).isEqualTo(e.getResponseCode()); + assertThat(e2.getErrorNum()).isEqualTo(e.getErrorNum()); + assertThat(e2.getRequestId()).isEqualTo(e.getRequestId()); + } + + @Test + void serializeArangoDBRedirectException() throws IOException, ClassNotFoundException { + ArangoDBRedirectException e = new ArangoDBRedirectException("foo", "bar"); + ArangoDBRedirectException e2 = roundTrip(e); + assertThat(e2.getMessage()).isEqualTo(e.getMessage()); + assertThat(e2.getLocation()).isEqualTo(e.getLocation()); + } + + @Test + void serializeArangoDBMultipleException() throws IOException, ClassNotFoundException { + List exceptions = Collections.singletonList(new RuntimeException("foo")); + ArangoDBMultipleException e = new ArangoDBMultipleException(exceptions); + ArangoDBMultipleException e2 = roundTrip(e); + assertThat(e2.getExceptions()).hasSize(1); + assertThat(e2.getExceptions().iterator().next().getMessage()).isEqualTo("foo"); + } + + @Test + void serializeBaseDocument() throws IOException, ClassNotFoundException { + BaseDocument doc = new BaseDocument(); + doc.setKey("test"); + doc.setId("id"); + doc.setRevision("revision"); + doc.addAttribute("foo", "bar"); + BaseDocument doc2 = roundTrip(doc); + assertThat(doc2).isEqualTo(doc); + } + + @Test + void serializeBaseEdgeDocument() throws IOException, ClassNotFoundException { + BaseEdgeDocument doc = new BaseEdgeDocument(); + doc.setKey("test"); + doc.setId("id"); + doc.setRevision("revision"); + doc.setFrom("from"); + doc.setTo("to"); + doc.addAttribute("foo", "bar"); + BaseDocument doc2 = roundTrip(doc); + assertThat(doc2).isEqualTo(doc); + } + + private T roundTrip(T input) throws IOException, ClassNotFoundException { + ByteArrayOutputStream os = new ByteArrayOutputStream(); + ObjectOutputStream objectOutputStream = new ObjectOutputStream(os); + objectOutputStream.writeObject(input); + + InputStream is = new ByteArrayInputStream(os.toByteArray()); + ObjectInputStream objectInputStream = new ObjectInputStream(is); + T output = (T) objectInputStream.readObject(); + objectInputStream.close(); + + return output; + } +} diff --git a/test-functional/src/test/java/com/arangodb/StreamTransactionAsyncTest.java b/test-functional/src/test/java/com/arangodb/StreamTransactionAsyncTest.java new file mode 100644 index 000000000..2ad090146 --- /dev/null +++ b/test-functional/src/test/java/com/arangodb/StreamTransactionAsyncTest.java @@ -0,0 +1,817 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb; + +import com.arangodb.entity.*; +import com.arangodb.model.*; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; + +import java.util.*; +import java.util.concurrent.ExecutionException; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.catchThrowable; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + +/** + * @author Michele Rastelli + */ +class StreamTransactionAsyncTest extends BaseJunit5 { + + private static final String COLLECTION_NAME = "StreamTransactionTest_collection"; + + @BeforeAll + static void init() { + initCollections(COLLECTION_NAME); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void beginStreamTransaction(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isSingleServer()); + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + StreamTransactionEntity tx = db.beginStreamTransaction(null).get(); + assertThat(tx.getId()).isNotNull(); + assertThat(tx.getStatus()).isEqualTo(StreamTransactionStatus.running); + db.abortStreamTransaction(tx.getId()).get(); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void beginStreamTransactionWithNonExistingCollectionsShouldThrow(ArangoDatabaseAsync db) { + assumeTrue(isSingleServer()); + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + Throwable thrown = catchThrowable(() -> + db.beginStreamTransaction(new StreamTransactionOptions().writeCollections("notExistingCollection")).get()).getCause(); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void abortStreamTransaction(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isSingleServer()); + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + StreamTransactionEntity begunTx = db.beginStreamTransaction(null).get(); + StreamTransactionEntity abortedTx = db.abortStreamTransaction(begunTx.getId()).get(); + + assertThat(abortedTx.getId()).isNotNull(); + assertThat(abortedTx.getId()).isEqualTo(begunTx.getId()); + assertThat(abortedTx.getStatus()).isEqualTo(StreamTransactionStatus.aborted); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void abortStreamTransactionTwice(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isSingleServer()); + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + StreamTransactionEntity begunTx = db.beginStreamTransaction(null).get(); + db.abortStreamTransaction(begunTx.getId()); + db.abortStreamTransaction(begunTx.getId()); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void abortStreamTransactionWhenTransactionIdDoesNotExistsShouldThrow(ArangoDatabaseAsync db) { + assumeTrue(isSingleServer()); + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + Throwable thrown = catchThrowable(() -> db.abortStreamTransaction("000000").get()).getCause(); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void abortStreamTransactionWithInvalidTransactionIdShouldThrow(ArangoDatabaseAsync db) { + assumeTrue(isSingleServer()); + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + Throwable thrown = catchThrowable(() -> db.abortStreamTransaction("invalidTransactionId").get()).getCause(); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void abortCommittedStreamTransactionShouldThrow(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isSingleServer()); + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + StreamTransactionEntity createdTx = db.beginStreamTransaction(null).get(); + db.commitStreamTransaction(createdTx.getId()).get(); + Throwable thrown = catchThrowable(() -> db.abortStreamTransaction(createdTx.getId()).get()).getCause(); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void getStreamTransaction(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isSingleServer()); + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + StreamTransactionEntity createdTx = db.beginStreamTransaction(null).get(); + StreamTransactionEntity gotTx = db.getStreamTransaction(createdTx.getId()).get(); + + assertThat(gotTx.getId()).isNotNull(); + assertThat(gotTx.getId()).isEqualTo(createdTx.getId()); + assertThat(gotTx.getStatus()).isEqualTo(StreamTransactionStatus.running); + + db.abortStreamTransaction(createdTx.getId()).get(); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void getStreamTransactionWhenTransactionIdDoesNotExistsShouldThrow(ArangoDatabaseAsync db) { + assumeTrue(isSingleServer()); + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + Throwable thrown = catchThrowable(() -> db.getStreamTransaction("000000").get()).getCause(); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void getStreamTransactionWithInvalidTransactionIdShouldThrow(ArangoDatabaseAsync db) { + assumeTrue(isSingleServer()); + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + Throwable thrown = catchThrowable(() -> db.getStreamTransaction("invalidTransactionId").get()).getCause(); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void commitStreamTransaction(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isSingleServer()); + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + StreamTransactionEntity createdTx = db.beginStreamTransaction(null).get(); + StreamTransactionEntity committedTx = db.commitStreamTransaction(createdTx.getId()).get(); + + assertThat(committedTx.getId()).isNotNull(); + assertThat(committedTx.getId()).isEqualTo(createdTx.getId()); + assertThat(committedTx.getStatus()).isEqualTo(StreamTransactionStatus.committed); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void commitStreamTransactionTwice(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isSingleServer()); + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + StreamTransactionEntity createdTx = db.beginStreamTransaction(null).get(); + db.commitStreamTransaction(createdTx.getId()); + db.commitStreamTransaction(createdTx.getId()); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void commitStreamTransactionWhenTransactionIdDoesNotExistsShouldThrow(ArangoDatabaseAsync db) { + assumeTrue(isSingleServer()); + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + Throwable thrown = catchThrowable(() -> db.commitStreamTransaction("000000").get()).getCause(); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void commitStreamTransactionWithInvalidTransactionIdShouldThrow(ArangoDatabaseAsync db) { + assumeTrue(isSingleServer()); + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + Throwable thrown = catchThrowable(() -> db.commitStreamTransaction("invalidTransactionId").get()).getCause(); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void commitAbortedStreamTransactionShouldThrow(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isSingleServer()); + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + StreamTransactionEntity createdTx = db.beginStreamTransaction(null).get(); + db.abortStreamTransaction(createdTx.getId()).get(); + Throwable thrown = catchThrowable(() -> db.commitStreamTransaction(createdTx.getId()).get()).getCause(); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void getDocument(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isSingleServer()); + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + StreamTransactionEntity tx = db + .beginStreamTransaction(new StreamTransactionOptions().readCollections(COLLECTION_NAME)).get(); + ArangoCollectionAsync collection = db.collection(COLLECTION_NAME); + + // insert a document from outside the tx + DocumentCreateEntity externalDoc = collection + .insertDocument(new BaseDocument(), null).get(); + + // assert that the document is not found from within the tx + assertThat(collection.getDocument(externalDoc.getKey(), BaseDocument.class, + new DocumentReadOptions().streamTransactionId(tx.getId())).get()).isNull(); + + db.abortStreamTransaction(tx.getId()).get(); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void getDocumentWithNonExistingTransactionIdShouldThrow(ArangoDatabaseAsync db) { + assumeTrue(isSingleServer()); + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + ArangoCollectionAsync collection = db.collection(COLLECTION_NAME); + + Throwable thrown = catchThrowable(() -> collection + .getDocument("docId", BaseDocument.class, new DocumentReadOptions().streamTransactionId("123456")).get()).getCause(); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void getDocumentWithInvalidTransactionIdShouldThrow(ArangoDatabaseAsync db) { + assumeTrue(isSingleServer()); + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + ArangoCollectionAsync collection = db.collection(COLLECTION_NAME); + Throwable thrown = catchThrowable(() -> collection + .getDocument("docId", BaseDocument.class, new DocumentReadOptions().streamTransactionId("abcde")).get()).getCause(); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void getDocuments(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isSingleServer()); + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + StreamTransactionEntity tx = db + .beginStreamTransaction(new StreamTransactionOptions().readCollections(COLLECTION_NAME)).get(); + ArangoCollectionAsync collection = db.collection(COLLECTION_NAME); + + // insert documents from outside the tx + DocumentCreateEntity externalDoc1 = collection + .insertDocument(new BaseDocument(), null).get(); + + DocumentCreateEntity externalDoc2 = collection + .insertDocument(new BaseDocument(), null).get(); + + // assert that the documents are not found from within the tx + MultiDocumentEntity documents = collection + .getDocuments(Arrays.asList(externalDoc1.getId(), externalDoc2.getId()), BaseDocument.class, + new DocumentReadOptions().streamTransactionId(tx.getId())).get(); + + assertThat(documents.getDocuments()).isEmpty(); + + db.abortStreamTransaction(tx.getId()).get(); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void insertDocument(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isSingleServer()); + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + StreamTransactionEntity tx = db.beginStreamTransaction( + new StreamTransactionOptions().readCollections(COLLECTION_NAME).writeCollections(COLLECTION_NAME)).get(); + ArangoCollectionAsync collection = db.collection(COLLECTION_NAME); + + // insert a document from within the tx + DocumentCreateEntity txDoc = collection + .insertDocument(new BaseDocument(), new DocumentCreateOptions().streamTransactionId(tx.getId())).get(); + + // assert that the document is not found from outside the tx + assertThat(collection.getDocument(txDoc.getKey(), BaseDocument.class, null).get()).isNull(); + + // assert that the document is found from within the tx + assertThat(collection.getDocument(txDoc.getKey(), BaseDocument.class, + new DocumentReadOptions().streamTransactionId(tx.getId())).get()).isNotNull(); + + db.commitStreamTransaction(tx.getId()).get(); + + // assert that the document is found after commit + assertThat(collection.getDocument(txDoc.getKey(), BaseDocument.class, null).get()).isNotNull(); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void insertDocuments(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isSingleServer()); + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + StreamTransactionEntity tx = db.beginStreamTransaction( + new StreamTransactionOptions().readCollections(COLLECTION_NAME).writeCollections(COLLECTION_NAME)).get(); + ArangoCollectionAsync collection = db.collection(COLLECTION_NAME); + + // insert documents from within the tx + MultiDocumentEntity> txDocs = collection + .insertDocuments(Arrays.asList(new BaseDocument(), new BaseDocument(), new BaseDocument()), + new DocumentCreateOptions().streamTransactionId(tx.getId()), BaseDocument.class).get(); + + List keys = txDocs.getDocuments().stream().map(DocumentEntity::getKey).collect(Collectors.toList()); + + // assert that the documents are not found from outside the tx + assertThat(collection.getDocuments(keys, BaseDocument.class, null).get().getDocuments()).isEmpty(); + + // assert that the documents are found from within the tx + assertThat(collection + .getDocuments(keys, BaseDocument.class, new DocumentReadOptions().streamTransactionId(tx.getId())).get() + .getDocuments()).hasSize(keys.size()); + + db.commitStreamTransaction(tx.getId()).get(); + + // assert that the document is found after commit + assertThat(collection.getDocuments(keys, BaseDocument.class, null).get().getDocuments()).hasSize(keys.size()); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void replaceDocument(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isSingleServer()); + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + doc.addAttribute("test", "foo"); + + ArangoCollectionAsync collection = db.collection(COLLECTION_NAME); + DocumentCreateEntity createdDoc = collection.insertDocument(doc, null).get(); + + StreamTransactionEntity tx = db.beginStreamTransaction( + new StreamTransactionOptions().readCollections(COLLECTION_NAME).writeCollections(COLLECTION_NAME)).get(); + + // replace document from within the tx + doc.updateAttribute("test", "bar"); + collection.replaceDocument(createdDoc.getKey(), doc, + new DocumentReplaceOptions().streamTransactionId(tx.getId())).get(); + + // assert that the document has not been replaced from outside the tx + assertThat(collection.getDocument(createdDoc.getKey(), BaseDocument.class, null).get() + .getProperties()).containsEntry("test", "foo"); + + // assert that the document has been replaced from within the tx + assertThat(collection.getDocument(createdDoc.getKey(), BaseDocument.class, + new DocumentReadOptions().streamTransactionId(tx.getId())).get().getProperties()).containsEntry("test", + "bar"); + + db.commitStreamTransaction(tx.getId()).get(); + + // assert that the document has been replaced after commit + assertThat(collection.getDocument(createdDoc.getKey(), BaseDocument.class, null).get() + .getProperties()).containsEntry("test", "bar"); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void replaceDocuments(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isSingleServer()); + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + List docs = IntStream.range(0, 3).mapToObj(it -> new BaseDocument()) + .peek(doc -> doc.addAttribute("test", "foo")).collect(Collectors.toList()); + + ArangoCollectionAsync collection = db.collection(COLLECTION_NAME); + List createdDocs = collection + .insertDocuments(docs, new DocumentCreateOptions().returnNew(true), BaseDocument.class).get().getDocuments().stream() + .map(DocumentCreateEntity::getNew).collect(Collectors.toList()); + + List keys = createdDocs.stream().map(BaseDocument::getKey).collect(Collectors.toList()); + + StreamTransactionEntity tx = db.beginStreamTransaction( + new StreamTransactionOptions().readCollections(COLLECTION_NAME).writeCollections(COLLECTION_NAME)).get(); + + List modifiedDocs = createdDocs.stream().peek(doc -> doc.updateAttribute("test", "bar")).collect(Collectors.toList()); + + // replace document from within the tx + collection + .replaceDocuments(modifiedDocs, new DocumentReplaceOptions().streamTransactionId(tx.getId())).get(); + + // assert that the documents has not been replaced from outside the tx + collection.getDocuments(keys, BaseDocument.class, null).get().getDocuments().stream() + .map(it -> ((String) it.getAttribute("test"))) + .forEach(it -> assertThat(it).isEqualTo("foo")); + + // assert that the document has been replaced from within the tx + collection + .getDocuments(keys, BaseDocument.class, new DocumentReadOptions().streamTransactionId(tx.getId())).get() + .getDocuments().stream().map(it -> ((String) it.getAttribute("test"))) + .forEach(it -> assertThat(it).isEqualTo("bar")); + + db.commitStreamTransaction(tx.getId()).get(); + + // assert that the document has been replaced after commit + collection.getDocuments(keys, BaseDocument.class, null).get().getDocuments().stream() + .map(it -> ((String) it.getAttribute("test"))) + .forEach(it -> assertThat(it).isEqualTo("bar")); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void updateDocument(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isSingleServer()); + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + doc.addAttribute("test", "foo"); + + ArangoCollectionAsync collection = db.collection(COLLECTION_NAME); + DocumentCreateEntity createdDoc = collection.insertDocument(doc, null).get(); + + StreamTransactionEntity tx = db.beginStreamTransaction( + new StreamTransactionOptions().readCollections(COLLECTION_NAME).writeCollections(COLLECTION_NAME)).get(); + + // update document from within the tx + doc.updateAttribute("test", "bar"); + collection + .updateDocument(createdDoc.getKey(), doc, new DocumentUpdateOptions().streamTransactionId(tx.getId())).get(); + + // assert that the document has not been updated from outside the tx + assertThat(collection.getDocument(createdDoc.getKey(), BaseDocument.class, null).get() + .getProperties()).containsEntry("test", "foo"); + + // assert that the document has been updated from within the tx + assertThat(collection.getDocument(createdDoc.getKey(), BaseDocument.class, + new DocumentReadOptions().streamTransactionId(tx.getId())).get().getProperties()).containsEntry("test", "bar"); + + db.commitStreamTransaction(tx.getId()).get(); + + // assert that the document has been updated after commit + assertThat(collection.getDocument(createdDoc.getKey(), BaseDocument.class, null).get() + .getProperties()).containsEntry("test", "bar"); + + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void updateDocuments(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isSingleServer()); + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + List docs = IntStream.range(0, 3).mapToObj(it -> new BaseDocument()) + .peek(doc -> doc.addAttribute("test", "foo")).collect(Collectors.toList()); + + ArangoCollectionAsync collection = db.collection(COLLECTION_NAME); + List createdDocs = collection + .insertDocuments(docs, new DocumentCreateOptions().returnNew(true), BaseDocument.class).get().getDocuments().stream() + .map(DocumentCreateEntity::getNew).collect(Collectors.toList()); + + List keys = createdDocs.stream().map(BaseDocument::getKey).collect(Collectors.toList()); + + StreamTransactionEntity tx = db.beginStreamTransaction( + new StreamTransactionOptions().readCollections(COLLECTION_NAME).writeCollections(COLLECTION_NAME)).get(); + + List modifiedDocs = createdDocs.stream().peek(doc -> doc.updateAttribute("test", "bar")).collect(Collectors.toList()); + + // update documents from within the tx + collection + .updateDocuments(modifiedDocs, new DocumentUpdateOptions().streamTransactionId(tx.getId())).get(); + + // assert that the documents have not been updated from outside the tx + collection.getDocuments(keys, BaseDocument.class, null).get().getDocuments().stream() + .map(it -> ((String) it.getAttribute("test"))) + .forEach(it -> assertThat(it).isEqualTo("foo")); + + // assert that the documents have been updated from within the tx + collection + .getDocuments(keys, BaseDocument.class, new DocumentReadOptions().streamTransactionId(tx.getId())).get() + .getDocuments().stream().map(it -> ((String) it.getAttribute("test"))) + .forEach(it -> assertThat(it).isEqualTo("bar")); + + db.commitStreamTransaction(tx.getId()).get(); + + // assert that the document has been updated after commit + collection.getDocuments(keys, BaseDocument.class, null).get().getDocuments().stream() + .map(it -> ((String) it.getAttribute("test"))) + .forEach(it -> assertThat(it).isEqualTo("bar")); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void deleteDocument(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isSingleServer()); + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + ArangoCollectionAsync collection = db.collection(COLLECTION_NAME); + DocumentCreateEntity createdDoc = collection + .insertDocument(new BaseDocument(), null).get(); + + StreamTransactionEntity tx = db.beginStreamTransaction( + new StreamTransactionOptions().readCollections(COLLECTION_NAME).writeCollections(COLLECTION_NAME)).get(); + + // delete document from within the tx + collection + .deleteDocument(createdDoc.getKey(), new DocumentDeleteOptions().streamTransactionId(tx.getId())).get(); + + // assert that the document has not been deleted from outside the tx + assertThat(collection.getDocument(createdDoc.getKey(), BaseDocument.class, null).get()).isNotNull(); + + // assert that the document has been deleted from within the tx + assertThat(collection.getDocument(createdDoc.getKey(), BaseDocument.class, + new DocumentReadOptions().streamTransactionId(tx.getId())).get()).isNull(); + + db.commitStreamTransaction(tx.getId()).get(); + + // assert that the document has been deleted after commit + assertThat(collection.getDocument(createdDoc.getKey(), BaseDocument.class, null).get()).isNull(); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void deleteDocuments(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isSingleServer()); + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + ArangoCollectionAsync collection = db.collection(COLLECTION_NAME); + List keys = collection + .insertDocuments(Arrays.asList(new BaseDocument(), new BaseDocument(), new BaseDocument())).get() + .getDocuments().stream().map(DocumentEntity::getKey).collect(Collectors.toList()); + + StreamTransactionEntity tx = db.beginStreamTransaction( + new StreamTransactionOptions().readCollections(COLLECTION_NAME).writeCollections(COLLECTION_NAME)).get(); + + // delete document from within the tx + collection + .deleteDocuments(keys, new DocumentDeleteOptions().streamTransactionId(tx.getId())).get(); + + // assert that the documents has not been deleted from outside the tx + assertThat(collection.getDocuments(keys, BaseDocument.class, null).get().getDocuments()).hasSize(keys.size()); + + // assert that the document has been deleted from within the tx + assertThat(collection + .getDocuments(keys, BaseDocument.class, new DocumentReadOptions().streamTransactionId(tx.getId())).get() + .getDocuments()).isEmpty(); + + db.commitStreamTransaction(tx.getId()).get(); + + // assert that the document has been deleted after commit + assertThat(collection.getDocuments(keys, BaseDocument.class, null).get().getDocuments()).isEmpty(); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void documentExists(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isSingleServer()); + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + StreamTransactionEntity tx = db + .beginStreamTransaction(new StreamTransactionOptions().readCollections(COLLECTION_NAME)).get(); + ArangoCollectionAsync collection = db.collection(COLLECTION_NAME); + + // insert a document from outside the tx + DocumentCreateEntity externalDoc = collection + .insertDocument(new BaseDocument(), null).get(); + + // assert that the document is not found from within the tx + assertThat(collection + .documentExists(externalDoc.getKey(), new DocumentExistsOptions().streamTransactionId(tx.getId())).get()).isFalse(); + + db.abortStreamTransaction(tx.getId()).get(); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void count(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isSingleServer()); + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + ArangoCollectionAsync collection = db.collection(COLLECTION_NAME); + Long initialCount = collection.count().get().getCount(); + + StreamTransactionEntity tx = db + .beginStreamTransaction(new StreamTransactionOptions().readCollections(COLLECTION_NAME)).get(); + + // insert a document from outside the tx + collection.insertDocument(new BaseDocument(), null).get(); + + // assert that the document is not counted from within the tx + assertThat(collection.count(new CollectionCountOptions().streamTransactionId(tx.getId())).get() + .getCount()).isEqualTo(initialCount); + + db.abortStreamTransaction(tx.getId()).get(); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void truncate(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isSingleServer()); + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + ArangoCollectionAsync collection = db.collection(COLLECTION_NAME); + collection.insertDocument(new BaseDocument(), null).get(); + + StreamTransactionEntity tx = db.beginStreamTransaction( + new StreamTransactionOptions().readCollections(COLLECTION_NAME).writeCollections(COLLECTION_NAME)).get(); + + // truncate document from within the tx + collection.truncate(new CollectionTruncateOptions().streamTransactionId(tx.getId())).get(); + + // assert that the collection has not been truncated from outside the tx + assertThat(collection.count().get().getCount()).isPositive(); + + // assert that the collection has been truncated from within the tx + assertThat(collection.count(new CollectionCountOptions().streamTransactionId(tx.getId())).get() + .getCount()).isZero(); + + db.commitStreamTransaction(tx.getId()).get(); + + // assert that the collection has been truncated after commit + assertThat(collection.count().get().getCount()).isZero(); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void createCursor(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isSingleServer()); + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + StreamTransactionEntity tx = db + .beginStreamTransaction(new StreamTransactionOptions().readCollections(COLLECTION_NAME)).get(); + ArangoCollectionAsync collection = db.collection(COLLECTION_NAME); + + // insert a document from outside the tx + DocumentCreateEntity externalDoc = collection + .insertDocument(new BaseDocument(), null).get(); + + final Map bindVars = new HashMap<>(); + bindVars.put("@collection", COLLECTION_NAME); + bindVars.put("key", externalDoc.getKey()); + + ArangoCursorAsync cursor = db + .query("FOR doc IN @@collection FILTER doc._key == @key RETURN doc", BaseDocument.class, bindVars, + new AqlQueryOptions().streamTransactionId(tx.getId())).get(); + + // assert that the document is not found from within the tx + assertThat(cursor.getResult()).isEmpty(); + + db.abortStreamTransaction(tx.getId()).get(); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void nextCursor(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isSingleServer()); + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + StreamTransactionEntity tx = db.beginStreamTransaction( + new StreamTransactionOptions().readCollections(COLLECTION_NAME).writeCollections(COLLECTION_NAME)).get(); + ArangoCollectionAsync collection = db.collection(COLLECTION_NAME); + + // insert documents from within the tx + List keys = collection + .insertDocuments(IntStream.range(0, 10).mapToObj(it -> new BaseDocument()).collect(Collectors.toList()), + new DocumentCreateOptions().streamTransactionId(tx.getId())).get().getDocuments().stream() + .map(DocumentEntity::getKey).collect(Collectors.toList()); + + final Map bindVars = new HashMap<>(); + bindVars.put("@collection", COLLECTION_NAME); + bindVars.put("keys", keys); + + ArangoCursorAsync cursor = db + .query("FOR doc IN @@collection FILTER CONTAINS_ARRAY(@keys, doc._key) RETURN doc", BaseDocument.class, bindVars, + new AqlQueryOptions().streamTransactionId(tx.getId()).batchSize(2)).get(); + + List docs = new ArrayList<>(cursor.getResult()); + while (cursor.hasMore()) { + cursor = cursor.nextBatch().get(); + docs.addAll(cursor.getResult()); + } + + // assert that all the keys are returned from the query + assertThat(docs.stream().map(BaseDocument::getKey).collect(Collectors.toList())).containsAll(keys); + db.abortStreamTransaction(tx.getId()); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void getStreamTransactions(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isSingleServer()); + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + StreamTransactionEntity tx1 = db.beginStreamTransaction(null).get(); + StreamTransactionEntity tx2 = db.beginStreamTransaction(null).get(); + + List createdIds = Arrays.asList(tx1.getId(), tx2.getId()); + Set gotTxs = db.getStreamTransactions().get().stream(). + filter(it -> createdIds.contains(it.getId())).collect(Collectors.toSet()); + + assertThat(gotTxs).hasSameSizeAs(createdIds); + assertThat(gotTxs.stream() + .allMatch(it -> it.getState() == StreamTransactionStatus.running)).isTrue(); + + db.abortStreamTransaction(tx1.getId()).get(); + db.abortStreamTransaction(tx2.getId()).get(); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void transactionAllowImplicitFalse(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isSingleServer()); + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + StreamTransactionEntity tx = db + .beginStreamTransaction(new StreamTransactionOptions().allowImplicit(false)).get(); + ArangoCollectionAsync collection = db.collection(COLLECTION_NAME); + + // insert a document from outside the tx + DocumentCreateEntity externalDoc = collection + .insertDocument(new BaseDocument(), null).get(); + + // assert that we cannot read from collection + Throwable thrown = catchThrowable(() -> collection.getDocument(externalDoc.getKey(), BaseDocument.class, + new DocumentReadOptions().streamTransactionId(tx.getId())).get()).getCause(); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + ArangoDBException e = (ArangoDBException) thrown; + assertThat(e.getResponseCode()).isEqualTo(400); + assertThat(e.getErrorNum()).isEqualTo(1652); + assertThat(e.getMessage()).contains("unregistered collection used in transaction"); + + db.abortStreamTransaction(tx.getId()).get(); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void transactionDirtyRead(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isCluster()); + assumeTrue(isAtLeastVersion(3, 10)); + + ArangoCollectionAsync collection = db.collection(COLLECTION_NAME); + DocumentCreateEntity doc = collection.insertDocument(new BaseDocument()).get(); + + StreamTransactionEntity tx = db + .beginStreamTransaction(new StreamTransactionOptions() + .readCollections(COLLECTION_NAME) + .allowDirtyRead(true)).get(); + + MultiDocumentEntity readDocs = collection.getDocuments(Collections.singletonList(doc.getKey()), + BaseDocument.class, + new DocumentReadOptions().streamTransactionId(tx.getId())).get(); + + assertThat(readDocs.isPotentialDirtyRead()).isTrue(); + assertThat(readDocs.getDocuments()).hasSize(1); + + final ArangoCursorAsync cursor = db.query("FOR i IN @@col RETURN i", BaseDocument.class, + Collections.singletonMap("@col", COLLECTION_NAME), + new AqlQueryOptions().streamTransactionId(tx.getId())).get(); + assertThat(cursor.isPotentialDirtyRead()).isTrue(); + + db.abortStreamTransaction(tx.getId()).get(); + } + +} diff --git a/test-functional/src/test/java/com/arangodb/StreamTransactionConflictsAsyncTest.java b/test-functional/src/test/java/com/arangodb/StreamTransactionConflictsAsyncTest.java new file mode 100644 index 000000000..94e3996df --- /dev/null +++ b/test-functional/src/test/java/com/arangodb/StreamTransactionConflictsAsyncTest.java @@ -0,0 +1,120 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb; + +import com.arangodb.entity.ArangoDBEngine; +import com.arangodb.entity.BaseDocument; +import com.arangodb.entity.StreamTransactionEntity; +import com.arangodb.model.DocumentCreateOptions; +import com.arangodb.model.StreamTransactionOptions; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; + +import java.util.UUID; +import java.util.concurrent.ExecutionException; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.catchThrowable; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + + +/** + * @author Michele Rastelli + */ +class StreamTransactionConflictsAsyncTest extends BaseJunit5 { + + private static final String COLLECTION_NAME = "db_concurrent_stream_transactions_test-" + UUID.randomUUID(); + + @BeforeAll + static void init() { + initCollections(COLLECTION_NAME); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void conflictOnInsertDocumentWithNotYetCommittedTx(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isSingleServer()); + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + StreamTransactionEntity tx1 = db.beginStreamTransaction( + new StreamTransactionOptions().readCollections(COLLECTION_NAME).writeCollections(COLLECTION_NAME)).get(); + + StreamTransactionEntity tx2 = db.beginStreamTransaction( + new StreamTransactionOptions().readCollections(COLLECTION_NAME).writeCollections(COLLECTION_NAME)).get(); + + String key = UUID.randomUUID().toString(); + + // insert a document from within tx1 + db.collection(COLLECTION_NAME) + .insertDocument(new BaseDocument(key), new DocumentCreateOptions().streamTransactionId(tx1.getId())).get(); + + // insert conflicting document from within tx2 + Throwable thrown = catchThrowable(() -> db.collection(COLLECTION_NAME).insertDocument(new BaseDocument(key), + new DocumentCreateOptions().streamTransactionId(tx2.getId())).get()).getCause(); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + ArangoDBException e = (ArangoDBException) thrown; + + if (isAtLeastVersion(3, 8)) { + assertThat(e.getResponseCode()).isEqualTo(409); + assertThat(e.getErrorNum()).isEqualTo(1200); + } + + db.abortStreamTransaction(tx1.getId()).get(); + db.abortStreamTransaction(tx2.getId()).get(); + } + + @ParameterizedTest + @MethodSource("asyncDbs") + void conflictOnInsertDocumentWithAlreadyCommittedTx(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + assumeTrue(isSingleServer()); + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + StreamTransactionEntity tx1 = db.beginStreamTransaction( + new StreamTransactionOptions().readCollections(COLLECTION_NAME).writeCollections(COLLECTION_NAME)).get(); + + StreamTransactionEntity tx2 = db.beginStreamTransaction( + new StreamTransactionOptions().readCollections(COLLECTION_NAME).writeCollections(COLLECTION_NAME)).get(); + + String key = UUID.randomUUID().toString(); + + // insert a document from within tx1 + db.collection(COLLECTION_NAME) + .insertDocument(new BaseDocument(key), new DocumentCreateOptions().streamTransactionId(tx1.getId())).get(); + + // commit tx1 + db.commitStreamTransaction(tx1.getId()).get(); + + // insert conflicting document from within tx2 + Throwable thrown = catchThrowable(() -> db.collection(COLLECTION_NAME).insertDocument(new BaseDocument(key), + new DocumentCreateOptions().streamTransactionId(tx2.getId())).get()).getCause(); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + ArangoDBException e = (ArangoDBException) thrown; + if (isAtLeastVersion(3, 8)) { + assertThat(e.getResponseCode()).isEqualTo(409); + assertThat(e.getErrorNum()).isEqualTo(1200); + } + + db.abortStreamTransaction(tx2.getId()); + } +} diff --git a/test-functional/src/test/java/com/arangodb/StreamTransactionConflictsTest.java b/test-functional/src/test/java/com/arangodb/StreamTransactionConflictsTest.java new file mode 100644 index 000000000..71b4a01ed --- /dev/null +++ b/test-functional/src/test/java/com/arangodb/StreamTransactionConflictsTest.java @@ -0,0 +1,119 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb; + +import com.arangodb.entity.ArangoDBEngine; +import com.arangodb.entity.BaseDocument; +import com.arangodb.entity.StreamTransactionEntity; +import com.arangodb.model.DocumentCreateOptions; +import com.arangodb.model.StreamTransactionOptions; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; + +import java.util.UUID; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.catchThrowable; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + + +/** + * @author Michele Rastelli + */ +class StreamTransactionConflictsTest extends BaseJunit5 { + + private static final String COLLECTION_NAME = "db_concurrent_stream_transactions_test-" + UUID.randomUUID(); + + @BeforeAll + static void init() { + initCollections(COLLECTION_NAME); + } + + @ParameterizedTest + @MethodSource("dbs") + void conflictOnInsertDocumentWithNotYetCommittedTx(ArangoDatabase db) { + assumeTrue(isSingleServer()); + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + StreamTransactionEntity tx1 = db.beginStreamTransaction( + new StreamTransactionOptions().readCollections(COLLECTION_NAME).writeCollections(COLLECTION_NAME)); + + StreamTransactionEntity tx2 = db.beginStreamTransaction( + new StreamTransactionOptions().readCollections(COLLECTION_NAME).writeCollections(COLLECTION_NAME)); + + String key = UUID.randomUUID().toString(); + + // insert a document from within tx1 + db.collection(COLLECTION_NAME) + .insertDocument(new BaseDocument(key), new DocumentCreateOptions().streamTransactionId(tx1.getId())); + + // insert conflicting document from within tx2 + Throwable thrown = catchThrowable(() -> db.collection(COLLECTION_NAME).insertDocument(new BaseDocument(key), + new DocumentCreateOptions().streamTransactionId(tx2.getId()))); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + ArangoDBException e = (ArangoDBException) thrown; + + if (isAtLeastVersion(3, 8)) { + assertThat(e.getResponseCode()).isEqualTo(409); + assertThat(e.getErrorNum()).isEqualTo(1200); + } + + db.abortStreamTransaction(tx1.getId()); + db.abortStreamTransaction(tx2.getId()); + } + + @ParameterizedTest + @MethodSource("dbs") + void conflictOnInsertDocumentWithAlreadyCommittedTx(ArangoDatabase db) { + assumeTrue(isSingleServer()); + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + StreamTransactionEntity tx1 = db.beginStreamTransaction( + new StreamTransactionOptions().readCollections(COLLECTION_NAME).writeCollections(COLLECTION_NAME)); + + StreamTransactionEntity tx2 = db.beginStreamTransaction( + new StreamTransactionOptions().readCollections(COLLECTION_NAME).writeCollections(COLLECTION_NAME)); + + String key = UUID.randomUUID().toString(); + + // insert a document from within tx1 + db.collection(COLLECTION_NAME) + .insertDocument(new BaseDocument(key), new DocumentCreateOptions().streamTransactionId(tx1.getId())); + + // commit tx1 + db.commitStreamTransaction(tx1.getId()); + + // insert conflicting document from within tx2 + Throwable thrown = catchThrowable(() -> db.collection(COLLECTION_NAME).insertDocument(new BaseDocument(key), + new DocumentCreateOptions().streamTransactionId(tx2.getId()))); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + ArangoDBException e = (ArangoDBException) thrown; + if (isAtLeastVersion(3, 8)) { + assertThat(e.getResponseCode()).isEqualTo(409); + assertThat(e.getErrorNum()).isEqualTo(1200); + } + + db.abortStreamTransaction(tx2.getId()); + } +} diff --git a/test-functional/src/test/java/com/arangodb/StreamTransactionGraphAsyncTest.java b/test-functional/src/test/java/com/arangodb/StreamTransactionGraphAsyncTest.java new file mode 100644 index 000000000..b83f453ae --- /dev/null +++ b/test-functional/src/test/java/com/arangodb/StreamTransactionGraphAsyncTest.java @@ -0,0 +1,408 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb; + +import com.arangodb.entity.*; +import com.arangodb.model.*; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; + +import java.util.Collections; +import java.util.UUID; +import java.util.concurrent.ExecutionException; +import java.util.stream.Stream; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + + +/** + * @author Michele Rastelli + */ +class StreamTransactionGraphAsyncTest extends BaseJunit5 { + + private static final String GRAPH_NAME = "graph_stream_transaction_graph_test"; + private static final String EDGE_COLLECTION = "edge_collection_stream_transaction_graph_test"; + private static final String VERTEX_COLLECTION_1 = "vertex_collection_1_stream_transaction_graph_test"; + private static final String VERTEX_COLLECTION_2 = "vertex_collection_2_stream_transaction_graph_test"; + + private static Stream asyncVertices() { + return asyncDbsStream() + .map(mapNamedPayload(db -> db.graph(GRAPH_NAME).vertexCollection(VERTEX_COLLECTION_1))) + .map(Arguments::of); + } + + private static Stream asyncEdges() { + return asyncDbsStream() + .map(mapNamedPayload(db -> db.graph(GRAPH_NAME).edgeCollection(EDGE_COLLECTION))) + .map(Arguments::of); + } + + @BeforeAll + static void init() { + initDB(); + initGraph(GRAPH_NAME, Collections.singletonList(new EdgeDefinition() + .collection(EDGE_COLLECTION).from(VERTEX_COLLECTION_1).to(VERTEX_COLLECTION_2) + ), null); + } + + private BaseEdgeDocument createEdgeValue(String streamTransactionId, ArangoGraphAsync graph) throws ExecutionException, InterruptedException { + ArangoVertexCollectionAsync vertexCollection1 = graph.vertexCollection(VERTEX_COLLECTION_1); + ArangoVertexCollectionAsync vertexCollection2 = graph.vertexCollection(VERTEX_COLLECTION_2); + VertexEntity v1 = vertexCollection1.insertVertex(new BaseDocument(), + new VertexCreateOptions().streamTransactionId(streamTransactionId)).get(); + VertexEntity v2 = vertexCollection2.insertVertex(new BaseDocument(), + new VertexCreateOptions().streamTransactionId(streamTransactionId)).get(); + BaseEdgeDocument value = new BaseEdgeDocument(); + value.setFrom(v1.getId()); + value.setTo(v2.getId()); + return value; + } + + @ParameterizedTest + @MethodSource("asyncVertices") + void getVertex(ArangoVertexCollectionAsync vertexCollection1) throws ExecutionException, InterruptedException { + assumeTrue(isSingleServer()); + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + ArangoDatabaseAsync db = vertexCollection1.graph().db(); + StreamTransactionEntity tx = db + .beginStreamTransaction(new StreamTransactionOptions() + .readCollections(VERTEX_COLLECTION_1, VERTEX_COLLECTION_2, EDGE_COLLECTION) + .writeCollections(VERTEX_COLLECTION_1, VERTEX_COLLECTION_2, EDGE_COLLECTION)).get(); + + // insert a vertex from outside the tx + VertexEntity createdVertex = vertexCollection1.insertVertex(new BaseDocument()).get(); + + // assert that the vertex is not found from within the tx + assertThat(vertexCollection1.getVertex(createdVertex.getKey(), BaseDocument.class, + new GraphDocumentReadOptions().streamTransactionId(tx.getId())).get()).isNull(); + + db.abortStreamTransaction(tx.getId()).get(); + } + + + @ParameterizedTest + @MethodSource("asyncVertices") + void createVertex(ArangoVertexCollectionAsync vertexCollection1) throws ExecutionException, InterruptedException { + assumeTrue(isSingleServer()); + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + ArangoDatabaseAsync db = vertexCollection1.graph().db(); + StreamTransactionEntity tx = db.beginStreamTransaction( + new StreamTransactionOptions() + .readCollections(VERTEX_COLLECTION_1, VERTEX_COLLECTION_2, EDGE_COLLECTION) + .writeCollections(VERTEX_COLLECTION_1, VERTEX_COLLECTION_2, EDGE_COLLECTION)).get(); + + // insert a vertex from within the tx + VertexEntity createdVertex = vertexCollection1.insertVertex(new BaseDocument(), + new VertexCreateOptions().streamTransactionId(tx.getId())).get(); + + // assert that the vertex is not found from outside the tx + assertThat(vertexCollection1.getVertex(createdVertex.getKey(), BaseDocument.class, null).get()).isNull(); + + // assert that the vertex is found from within the tx + assertThat(vertexCollection1.getVertex(createdVertex.getKey(), BaseDocument.class, + new GraphDocumentReadOptions().streamTransactionId(tx.getId())).get()).isNotNull(); + + db.commitStreamTransaction(tx.getId()).get(); + + // assert that the vertex is found after commit + assertThat(vertexCollection1.getVertex(createdVertex.getKey(), BaseDocument.class, null).get()).isNotNull(); + } + + @ParameterizedTest + @MethodSource("asyncVertices") + void replaceVertex(ArangoVertexCollectionAsync vertexCollection1) throws ExecutionException, InterruptedException { + assumeTrue(isSingleServer()); + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + doc.addAttribute("test", "foo"); + + VertexEntity createdVertex = vertexCollection1.insertVertex(doc, null).get(); + + ArangoDatabaseAsync db = vertexCollection1.graph().db(); + StreamTransactionEntity tx = db.beginStreamTransaction( + new StreamTransactionOptions() + .readCollections(VERTEX_COLLECTION_1, VERTEX_COLLECTION_2, EDGE_COLLECTION) + .writeCollections(VERTEX_COLLECTION_1, VERTEX_COLLECTION_2, EDGE_COLLECTION)).get(); + + // replace vertex from within the tx + doc.updateAttribute("test", "bar"); + vertexCollection1.replaceVertex(createdVertex.getKey(), doc, + new VertexReplaceOptions().streamTransactionId(tx.getId())).get(); + + // assert that the vertex has not been replaced from outside the tx + assertThat(vertexCollection1.getVertex(createdVertex.getKey(), BaseDocument.class, null).get() + .getProperties()).containsEntry("test", "foo"); + + // assert that the vertex has been replaced from within the tx + assertThat(vertexCollection1.getVertex(createdVertex.getKey(), BaseDocument.class, + new GraphDocumentReadOptions().streamTransactionId(tx.getId())).get().getProperties()).containsEntry("test" + , "bar"); + + db.commitStreamTransaction(tx.getId()).get(); + + // assert that the vertex has been replaced after commit + assertThat(vertexCollection1.getVertex(createdVertex.getKey(), BaseDocument.class, null).get() + .getProperties()).containsEntry("test", "bar"); + } + + @ParameterizedTest + @MethodSource("asyncVertices") + void updateVertex(ArangoVertexCollectionAsync vertexCollection1) throws ExecutionException, InterruptedException { + assumeTrue(isSingleServer()); + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + doc.addAttribute("test", "foo"); + + VertexEntity createdDoc = vertexCollection1.insertVertex(doc, null).get(); + + ArangoDatabaseAsync db = vertexCollection1.graph().db(); + StreamTransactionEntity tx = db.beginStreamTransaction( + new StreamTransactionOptions() + .readCollections(VERTEX_COLLECTION_1, VERTEX_COLLECTION_2, EDGE_COLLECTION) + .writeCollections(VERTEX_COLLECTION_1, VERTEX_COLLECTION_2, EDGE_COLLECTION)).get(); + + // update vertex from within the tx + doc.updateAttribute("test", "bar"); + vertexCollection1.updateVertex(createdDoc.getKey(), doc, + new VertexUpdateOptions().streamTransactionId(tx.getId())).get(); + + // assert that the vertex has not been updated from outside the tx + assertThat(vertexCollection1.getVertex(createdDoc.getKey(), BaseDocument.class, null).get() + .getProperties()).containsEntry("test", "foo"); + + // assert that the vertex has been updated from within the tx + assertThat(vertexCollection1.getVertex(createdDoc.getKey(), BaseDocument.class, + new GraphDocumentReadOptions().streamTransactionId(tx.getId())).get().getProperties()).containsEntry("test" + , "bar"); + + db.commitStreamTransaction(tx.getId()).get(); + + // assert that the vertex has been updated after commit + assertThat(vertexCollection1.getVertex(createdDoc.getKey(), BaseDocument.class, null).get() + .getProperties()).containsEntry("test", "bar"); + } + + @ParameterizedTest + @MethodSource("asyncVertices") + void deleteVertex(ArangoVertexCollectionAsync vertexCollection1) throws ExecutionException, InterruptedException { + assumeTrue(isSingleServer()); + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + VertexEntity createdDoc = vertexCollection1.insertVertex(new BaseDocument(), null).get(); + + ArangoDatabaseAsync db = vertexCollection1.graph().db(); + StreamTransactionEntity tx = db.beginStreamTransaction( + new StreamTransactionOptions() + .readCollections(VERTEX_COLLECTION_1, VERTEX_COLLECTION_2, EDGE_COLLECTION) + .writeCollections(VERTEX_COLLECTION_1, VERTEX_COLLECTION_2, EDGE_COLLECTION)).get(); + + // delete vertex from within the tx + vertexCollection1.deleteVertex(createdDoc.getKey(), new VertexDeleteOptions().streamTransactionId(tx.getId())).get(); + + // assert that the vertex has not been deleted from outside the tx + assertThat(vertexCollection1.getVertex(createdDoc.getKey(), BaseDocument.class, null).get()).isNotNull(); + + // assert that the vertex has been deleted from within the tx + assertThat(vertexCollection1.getVertex(createdDoc.getKey(), BaseDocument.class, + new GraphDocumentReadOptions().streamTransactionId(tx.getId())).get()).isNull(); + + db.commitStreamTransaction(tx.getId()).get(); + + // assert that the vertex has been deleted after commit + assertThat(vertexCollection1.getVertex(createdDoc.getKey(), BaseDocument.class, null).get()).isNull(); + } + + + @ParameterizedTest + @MethodSource("asyncEdges") + void getEdge(ArangoEdgeCollectionAsync edgeCollection) throws ExecutionException, InterruptedException { + assumeTrue(isSingleServer()); + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + ArangoDatabaseAsync db = edgeCollection.graph().db(); + StreamTransactionEntity tx = db + .beginStreamTransaction(new StreamTransactionOptions() + .readCollections(VERTEX_COLLECTION_1, VERTEX_COLLECTION_2, EDGE_COLLECTION) + .writeCollections(VERTEX_COLLECTION_1, VERTEX_COLLECTION_2, EDGE_COLLECTION)).get(); + + // insert an edge from outside the tx + EdgeEntity createdEdge = edgeCollection.insertEdge(createEdgeValue(null, edgeCollection.graph())).get(); + + // assert that the edge is not found from within the tx + assertThat(edgeCollection.getEdge(createdEdge.getKey(), BaseEdgeDocument.class, + new GraphDocumentReadOptions().streamTransactionId(tx.getId())).get()).isNull(); + + db.abortStreamTransaction(tx.getId()).get(); + } + + + @ParameterizedTest + @MethodSource("asyncEdges") + void createEdge(ArangoEdgeCollectionAsync edgeCollection) throws ExecutionException, InterruptedException { + assumeTrue(isSingleServer()); + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + ArangoDatabaseAsync db = edgeCollection.graph().db(); + StreamTransactionEntity tx = db.beginStreamTransaction( + new StreamTransactionOptions() + .readCollections(VERTEX_COLLECTION_1, VERTEX_COLLECTION_2, EDGE_COLLECTION) + .writeCollections(VERTEX_COLLECTION_1, VERTEX_COLLECTION_2, EDGE_COLLECTION)).get(); + + // insert an edge from within the tx + EdgeEntity createdEdge = edgeCollection.insertEdge(createEdgeValue(tx.getId(), edgeCollection.graph()), + new EdgeCreateOptions().streamTransactionId(tx.getId())).get(); + + // assert that the edge is not found from outside the tx + assertThat(edgeCollection.getEdge(createdEdge.getKey(), BaseEdgeDocument.class, null).get()).isNull(); + + // assert that the edge is found from within the tx + assertThat(edgeCollection.getEdge(createdEdge.getKey(), BaseEdgeDocument.class, + new GraphDocumentReadOptions().streamTransactionId(tx.getId())).get()).isNotNull(); + + db.commitStreamTransaction(tx.getId()).get(); + + // assert that the edge is found after commit + assertThat(edgeCollection.getEdge(createdEdge.getKey(), BaseEdgeDocument.class, null).get()).isNotNull(); + } + + @ParameterizedTest + @MethodSource("asyncEdges") + void replaceEdge(ArangoEdgeCollectionAsync edgeCollection) throws ExecutionException, InterruptedException { + assumeTrue(isSingleServer()); + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + BaseEdgeDocument doc = createEdgeValue(null, edgeCollection.graph()); + doc.addAttribute("test", "foo"); + + EdgeEntity createdEdge = edgeCollection.insertEdge(doc, null).get(); + + ArangoDatabaseAsync db = edgeCollection.graph().db(); + StreamTransactionEntity tx = db.beginStreamTransaction( + new StreamTransactionOptions() + .readCollections(VERTEX_COLLECTION_1, VERTEX_COLLECTION_2, EDGE_COLLECTION) + .writeCollections(VERTEX_COLLECTION_1, VERTEX_COLLECTION_2, EDGE_COLLECTION)).get(); + + // replace edge from within the tx + doc.updateAttribute("test", "bar"); + edgeCollection.replaceEdge(createdEdge.getKey(), doc, + new EdgeReplaceOptions().streamTransactionId(tx.getId())).get(); + + // assert that the edge has not been replaced from outside the tx + assertThat(edgeCollection.getEdge(createdEdge.getKey(), BaseEdgeDocument.class, null).get() + .getProperties()).containsEntry("test", "foo"); + + // assert that the edge has been replaced from within the tx + assertThat(edgeCollection.getEdge(createdEdge.getKey(), BaseEdgeDocument.class, + new GraphDocumentReadOptions().streamTransactionId(tx.getId())).get().getProperties()).containsEntry("test" + , "bar"); + + db.commitStreamTransaction(tx.getId()).get(); + + // assert that the edge has been replaced after commit + assertThat(edgeCollection.getEdge(createdEdge.getKey(), BaseEdgeDocument.class, null).get() + .getProperties()).containsEntry("test", "bar"); + } + + @ParameterizedTest + @MethodSource("asyncEdges") + void updateEdge(ArangoEdgeCollectionAsync edgeCollection) throws ExecutionException, InterruptedException { + assumeTrue(isSingleServer()); + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + BaseEdgeDocument doc = createEdgeValue(null, edgeCollection.graph()); + doc.addAttribute("test", "foo"); + + EdgeEntity createdDoc = edgeCollection.insertEdge(doc, null).get(); + + ArangoDatabaseAsync db = edgeCollection.graph().db(); + StreamTransactionEntity tx = db.beginStreamTransaction( + new StreamTransactionOptions() + .readCollections(VERTEX_COLLECTION_1, VERTEX_COLLECTION_2, EDGE_COLLECTION) + .writeCollections(VERTEX_COLLECTION_1, VERTEX_COLLECTION_2, EDGE_COLLECTION)).get(); + + // update edge from within the tx + doc.updateAttribute("test", "bar"); + edgeCollection.updateEdge(createdDoc.getKey(), doc, new EdgeUpdateOptions().streamTransactionId(tx.getId())).get(); + + // assert that the edge has not been updated from outside the tx + assertThat(edgeCollection.getEdge(createdDoc.getKey(), BaseEdgeDocument.class, null).get() + .getProperties()).containsEntry("test", "foo"); + + // assert that the edge has been updated from within the tx + assertThat(edgeCollection.getEdge(createdDoc.getKey(), BaseEdgeDocument.class, + new GraphDocumentReadOptions().streamTransactionId(tx.getId())).get().getProperties()).containsEntry("test" + , "bar"); + + db.commitStreamTransaction(tx.getId()).get(); + + // assert that the edge has been updated after commit + assertThat(edgeCollection.getEdge(createdDoc.getKey(), BaseEdgeDocument.class, null).get() + .getProperties()).containsEntry("test", "bar"); + } + + @ParameterizedTest + @MethodSource("asyncEdges") + void deleteEdge(ArangoEdgeCollectionAsync edgeCollection) throws ExecutionException, InterruptedException { + assumeTrue(isSingleServer()); + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + EdgeEntity createdDoc = edgeCollection.insertEdge(createEdgeValue(null, edgeCollection.graph()), null).get(); + + ArangoDatabaseAsync db = edgeCollection.graph().db(); + StreamTransactionEntity tx = db.beginStreamTransaction( + new StreamTransactionOptions() + .readCollections(VERTEX_COLLECTION_1, VERTEX_COLLECTION_2, EDGE_COLLECTION) + .writeCollections(VERTEX_COLLECTION_1, VERTEX_COLLECTION_2, EDGE_COLLECTION)).get(); + + // delete edge from within the tx + edgeCollection.deleteEdge(createdDoc.getKey(), new EdgeDeleteOptions().streamTransactionId(tx.getId())).get(); + + // assert that the edge has not been deleted from outside the tx + assertThat(edgeCollection.getEdge(createdDoc.getKey(), BaseEdgeDocument.class, null).get()).isNotNull(); + + // assert that the edge has been deleted from within the tx + assertThat(edgeCollection.getEdge(createdDoc.getKey(), BaseEdgeDocument.class, + new GraphDocumentReadOptions().streamTransactionId(tx.getId())).get()).isNull(); + + db.commitStreamTransaction(tx.getId()).get(); + + // assert that the edge has been deleted after commit + assertThat(edgeCollection.getEdge(createdDoc.getKey(), BaseEdgeDocument.class, null).get()).isNull(); + } + +} diff --git a/test-functional/src/test/java/com/arangodb/StreamTransactionGraphTest.java b/test-functional/src/test/java/com/arangodb/StreamTransactionGraphTest.java new file mode 100644 index 000000000..d4337788a --- /dev/null +++ b/test-functional/src/test/java/com/arangodb/StreamTransactionGraphTest.java @@ -0,0 +1,407 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb; + +import com.arangodb.entity.*; +import com.arangodb.model.*; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; + +import java.util.Collections; +import java.util.UUID; +import java.util.stream.Stream; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + + +/** + * @author Michele Rastelli + */ +class StreamTransactionGraphTest extends BaseJunit5 { + + private static final String GRAPH_NAME = "graph_stream_transaction_graph_test"; + private static final String EDGE_COLLECTION = "edge_collection_stream_transaction_graph_test"; + private static final String VERTEX_COLLECTION_1 = "vertex_collection_1_stream_transaction_graph_test"; + private static final String VERTEX_COLLECTION_2 = "vertex_collection_2_stream_transaction_graph_test"; + + private static Stream vertices() { + return dbsStream() + .map(mapNamedPayload(db -> db.graph(GRAPH_NAME).vertexCollection(VERTEX_COLLECTION_1))) + .map(Arguments::of); + } + + private static Stream edges() { + return dbsStream() + .map(mapNamedPayload(db -> db.graph(GRAPH_NAME).edgeCollection(EDGE_COLLECTION))) + .map(Arguments::of); + } + + @BeforeAll + static void init() { + initDB(); + initGraph(GRAPH_NAME, Collections.singletonList(new EdgeDefinition() + .collection(EDGE_COLLECTION).from(VERTEX_COLLECTION_1).to(VERTEX_COLLECTION_2) + ), null); + } + + private BaseEdgeDocument createEdgeValue(String streamTransactionId, ArangoGraph graph) { + ArangoVertexCollection vertexCollection1 = graph.vertexCollection(VERTEX_COLLECTION_1); + ArangoVertexCollection vertexCollection2 = graph.vertexCollection(VERTEX_COLLECTION_2); + VertexEntity v1 = vertexCollection1.insertVertex(new BaseDocument(), + new VertexCreateOptions().streamTransactionId(streamTransactionId)); + VertexEntity v2 = vertexCollection2.insertVertex(new BaseDocument(), + new VertexCreateOptions().streamTransactionId(streamTransactionId)); + BaseEdgeDocument value = new BaseEdgeDocument(); + value.setFrom(v1.getId()); + value.setTo(v2.getId()); + return value; + } + + @ParameterizedTest + @MethodSource("vertices") + void getVertex(ArangoVertexCollection vertexCollection1) { + assumeTrue(isSingleServer()); + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + ArangoDatabase db = vertexCollection1.graph().db(); + StreamTransactionEntity tx = db + .beginStreamTransaction(new StreamTransactionOptions() + .readCollections(VERTEX_COLLECTION_1, VERTEX_COLLECTION_2, EDGE_COLLECTION) + .writeCollections(VERTEX_COLLECTION_1, VERTEX_COLLECTION_2, EDGE_COLLECTION)); + + // insert a vertex from outside the tx + VertexEntity createdVertex = vertexCollection1.insertVertex(new BaseDocument()); + + // assert that the vertex is not found from within the tx + assertThat(vertexCollection1.getVertex(createdVertex.getKey(), BaseDocument.class, + new GraphDocumentReadOptions().streamTransactionId(tx.getId()))).isNull(); + + db.abortStreamTransaction(tx.getId()); + } + + + @ParameterizedTest + @MethodSource("vertices") + void createVertex(ArangoVertexCollection vertexCollection1) { + assumeTrue(isSingleServer()); + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + ArangoDatabase db = vertexCollection1.graph().db(); + StreamTransactionEntity tx = db.beginStreamTransaction( + new StreamTransactionOptions() + .readCollections(VERTEX_COLLECTION_1, VERTEX_COLLECTION_2, EDGE_COLLECTION) + .writeCollections(VERTEX_COLLECTION_1, VERTEX_COLLECTION_2, EDGE_COLLECTION)); + + // insert a vertex from within the tx + VertexEntity createdVertex = vertexCollection1.insertVertex(new BaseDocument(), + new VertexCreateOptions().streamTransactionId(tx.getId())); + + // assert that the vertex is not found from outside the tx + assertThat(vertexCollection1.getVertex(createdVertex.getKey(), BaseDocument.class, null)).isNull(); + + // assert that the vertex is found from within the tx + assertThat(vertexCollection1.getVertex(createdVertex.getKey(), BaseDocument.class, + new GraphDocumentReadOptions().streamTransactionId(tx.getId()))).isNotNull(); + + db.commitStreamTransaction(tx.getId()); + + // assert that the vertex is found after commit + assertThat(vertexCollection1.getVertex(createdVertex.getKey(), BaseDocument.class, null)).isNotNull(); + } + + @ParameterizedTest + @MethodSource("vertices") + void replaceVertex(ArangoVertexCollection vertexCollection1) { + assumeTrue(isSingleServer()); + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + doc.addAttribute("test", "foo"); + + VertexEntity createdVertex = vertexCollection1.insertVertex(doc, null); + + ArangoDatabase db = vertexCollection1.graph().db(); + StreamTransactionEntity tx = db.beginStreamTransaction( + new StreamTransactionOptions() + .readCollections(VERTEX_COLLECTION_1, VERTEX_COLLECTION_2, EDGE_COLLECTION) + .writeCollections(VERTEX_COLLECTION_1, VERTEX_COLLECTION_2, EDGE_COLLECTION)); + + // replace vertex from within the tx + doc.updateAttribute("test", "bar"); + vertexCollection1.replaceVertex(createdVertex.getKey(), doc, + new VertexReplaceOptions().streamTransactionId(tx.getId())); + + // assert that the vertex has not been replaced from outside the tx + assertThat(vertexCollection1.getVertex(createdVertex.getKey(), BaseDocument.class, null) + .getProperties()).containsEntry("test", "foo"); + + // assert that the vertex has been replaced from within the tx + assertThat(vertexCollection1.getVertex(createdVertex.getKey(), BaseDocument.class, + new GraphDocumentReadOptions().streamTransactionId(tx.getId())).getProperties()).containsEntry("test" + , "bar"); + + db.commitStreamTransaction(tx.getId()); + + // assert that the vertex has been replaced after commit + assertThat(vertexCollection1.getVertex(createdVertex.getKey(), BaseDocument.class, null) + .getProperties()).containsEntry("test", "bar"); + } + + @ParameterizedTest + @MethodSource("vertices") + void updateVertex(ArangoVertexCollection vertexCollection1) { + assumeTrue(isSingleServer()); + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + doc.addAttribute("test", "foo"); + + VertexEntity createdDoc = vertexCollection1.insertVertex(doc, null); + + ArangoDatabase db = vertexCollection1.graph().db(); + StreamTransactionEntity tx = db.beginStreamTransaction( + new StreamTransactionOptions() + .readCollections(VERTEX_COLLECTION_1, VERTEX_COLLECTION_2, EDGE_COLLECTION) + .writeCollections(VERTEX_COLLECTION_1, VERTEX_COLLECTION_2, EDGE_COLLECTION)); + + // update vertex from within the tx + doc.updateAttribute("test", "bar"); + vertexCollection1.updateVertex(createdDoc.getKey(), doc, + new VertexUpdateOptions().streamTransactionId(tx.getId())); + + // assert that the vertex has not been updated from outside the tx + assertThat(vertexCollection1.getVertex(createdDoc.getKey(), BaseDocument.class, null) + .getProperties()).containsEntry("test", "foo"); + + // assert that the vertex has been updated from within the tx + assertThat(vertexCollection1.getVertex(createdDoc.getKey(), BaseDocument.class, + new GraphDocumentReadOptions().streamTransactionId(tx.getId())).getProperties()).containsEntry("test" + , "bar"); + + db.commitStreamTransaction(tx.getId()); + + // assert that the vertex has been updated after commit + assertThat(vertexCollection1.getVertex(createdDoc.getKey(), BaseDocument.class, null) + .getProperties()).containsEntry("test", "bar"); + } + + @ParameterizedTest + @MethodSource("vertices") + void deleteVertex(ArangoVertexCollection vertexCollection1) { + assumeTrue(isSingleServer()); + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + VertexEntity createdDoc = vertexCollection1.insertVertex(new BaseDocument(), null); + + ArangoDatabase db = vertexCollection1.graph().db(); + StreamTransactionEntity tx = db.beginStreamTransaction( + new StreamTransactionOptions() + .readCollections(VERTEX_COLLECTION_1, VERTEX_COLLECTION_2, EDGE_COLLECTION) + .writeCollections(VERTEX_COLLECTION_1, VERTEX_COLLECTION_2, EDGE_COLLECTION)); + + // delete vertex from within the tx + vertexCollection1.deleteVertex(createdDoc.getKey(), new VertexDeleteOptions().streamTransactionId(tx.getId())); + + // assert that the vertex has not been deleted from outside the tx + assertThat(vertexCollection1.getVertex(createdDoc.getKey(), BaseDocument.class, null)).isNotNull(); + + // assert that the vertex has been deleted from within the tx + assertThat(vertexCollection1.getVertex(createdDoc.getKey(), BaseDocument.class, + new GraphDocumentReadOptions().streamTransactionId(tx.getId()))).isNull(); + + db.commitStreamTransaction(tx.getId()); + + // assert that the vertex has been deleted after commit + assertThat(vertexCollection1.getVertex(createdDoc.getKey(), BaseDocument.class, null)).isNull(); + } + + + @ParameterizedTest + @MethodSource("edges") + void getEdge(ArangoEdgeCollection edgeCollection) { + assumeTrue(isSingleServer()); + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + ArangoDatabase db = edgeCollection.graph().db(); + StreamTransactionEntity tx = db + .beginStreamTransaction(new StreamTransactionOptions() + .readCollections(VERTEX_COLLECTION_1, VERTEX_COLLECTION_2, EDGE_COLLECTION) + .writeCollections(VERTEX_COLLECTION_1, VERTEX_COLLECTION_2, EDGE_COLLECTION)); + + // insert an edge from outside the tx + EdgeEntity createdEdge = edgeCollection.insertEdge(createEdgeValue(null, edgeCollection.graph())); + + // assert that the edge is not found from within the tx + assertThat(edgeCollection.getEdge(createdEdge.getKey(), BaseEdgeDocument.class, + new GraphDocumentReadOptions().streamTransactionId(tx.getId()))).isNull(); + + db.abortStreamTransaction(tx.getId()); + } + + + @ParameterizedTest + @MethodSource("edges") + void createEdge(ArangoEdgeCollection edgeCollection) { + assumeTrue(isSingleServer()); + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + ArangoDatabase db = edgeCollection.graph().db(); + StreamTransactionEntity tx = db.beginStreamTransaction( + new StreamTransactionOptions() + .readCollections(VERTEX_COLLECTION_1, VERTEX_COLLECTION_2, EDGE_COLLECTION) + .writeCollections(VERTEX_COLLECTION_1, VERTEX_COLLECTION_2, EDGE_COLLECTION)); + + // insert an edge from within the tx + EdgeEntity createdEdge = edgeCollection.insertEdge(createEdgeValue(tx.getId(), edgeCollection.graph()), + new EdgeCreateOptions().streamTransactionId(tx.getId())); + + // assert that the edge is not found from outside the tx + assertThat(edgeCollection.getEdge(createdEdge.getKey(), BaseEdgeDocument.class, null)).isNull(); + + // assert that the edge is found from within the tx + assertThat(edgeCollection.getEdge(createdEdge.getKey(), BaseEdgeDocument.class, + new GraphDocumentReadOptions().streamTransactionId(tx.getId()))).isNotNull(); + + db.commitStreamTransaction(tx.getId()); + + // assert that the edge is found after commit + assertThat(edgeCollection.getEdge(createdEdge.getKey(), BaseEdgeDocument.class, null)).isNotNull(); + } + + @ParameterizedTest + @MethodSource("edges") + void replaceEdge(ArangoEdgeCollection edgeCollection) { + assumeTrue(isSingleServer()); + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + BaseEdgeDocument doc = createEdgeValue(null, edgeCollection.graph()); + doc.addAttribute("test", "foo"); + + EdgeEntity createdEdge = edgeCollection.insertEdge(doc, null); + + ArangoDatabase db = edgeCollection.graph().db(); + StreamTransactionEntity tx = db.beginStreamTransaction( + new StreamTransactionOptions() + .readCollections(VERTEX_COLLECTION_1, VERTEX_COLLECTION_2, EDGE_COLLECTION) + .writeCollections(VERTEX_COLLECTION_1, VERTEX_COLLECTION_2, EDGE_COLLECTION)); + + // replace edge from within the tx + doc.updateAttribute("test", "bar"); + edgeCollection.replaceEdge(createdEdge.getKey(), doc, + new EdgeReplaceOptions().streamTransactionId(tx.getId())); + + // assert that the edge has not been replaced from outside the tx + assertThat(edgeCollection.getEdge(createdEdge.getKey(), BaseEdgeDocument.class, null) + .getProperties()).containsEntry("test", "foo"); + + // assert that the edge has been replaced from within the tx + assertThat(edgeCollection.getEdge(createdEdge.getKey(), BaseEdgeDocument.class, + new GraphDocumentReadOptions().streamTransactionId(tx.getId())).getProperties()).containsEntry("test" + , "bar"); + + db.commitStreamTransaction(tx.getId()); + + // assert that the edge has been replaced after commit + assertThat(edgeCollection.getEdge(createdEdge.getKey(), BaseEdgeDocument.class, null) + .getProperties()).containsEntry("test", "bar"); + } + + @ParameterizedTest + @MethodSource("edges") + void updateEdge(ArangoEdgeCollection edgeCollection) { + assumeTrue(isSingleServer()); + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + BaseEdgeDocument doc = createEdgeValue(null, edgeCollection.graph()); + doc.addAttribute("test", "foo"); + + EdgeEntity createdDoc = edgeCollection.insertEdge(doc, null); + + ArangoDatabase db = edgeCollection.graph().db(); + StreamTransactionEntity tx = db.beginStreamTransaction( + new StreamTransactionOptions() + .readCollections(VERTEX_COLLECTION_1, VERTEX_COLLECTION_2, EDGE_COLLECTION) + .writeCollections(VERTEX_COLLECTION_1, VERTEX_COLLECTION_2, EDGE_COLLECTION)); + + // update edge from within the tx + doc.updateAttribute("test", "bar"); + edgeCollection.updateEdge(createdDoc.getKey(), doc, new EdgeUpdateOptions().streamTransactionId(tx.getId())); + + // assert that the edge has not been updated from outside the tx + assertThat(edgeCollection.getEdge(createdDoc.getKey(), BaseEdgeDocument.class, null) + .getProperties()).containsEntry("test", "foo"); + + // assert that the edge has been updated from within the tx + assertThat(edgeCollection.getEdge(createdDoc.getKey(), BaseEdgeDocument.class, + new GraphDocumentReadOptions().streamTransactionId(tx.getId())).getProperties()).containsEntry("test" + , "bar"); + + db.commitStreamTransaction(tx.getId()); + + // assert that the edge has been updated after commit + assertThat(edgeCollection.getEdge(createdDoc.getKey(), BaseEdgeDocument.class, null) + .getProperties()).containsEntry("test", "bar"); + } + + @ParameterizedTest + @MethodSource("edges") + void deleteEdge(ArangoEdgeCollection edgeCollection) { + assumeTrue(isSingleServer()); + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + EdgeEntity createdDoc = edgeCollection.insertEdge(createEdgeValue(null, edgeCollection.graph()), null); + + ArangoDatabase db = edgeCollection.graph().db(); + StreamTransactionEntity tx = db.beginStreamTransaction( + new StreamTransactionOptions() + .readCollections(VERTEX_COLLECTION_1, VERTEX_COLLECTION_2, EDGE_COLLECTION) + .writeCollections(VERTEX_COLLECTION_1, VERTEX_COLLECTION_2, EDGE_COLLECTION)); + + // delete edge from within the tx + edgeCollection.deleteEdge(createdDoc.getKey(), new EdgeDeleteOptions().streamTransactionId(tx.getId())); + + // assert that the edge has not been deleted from outside the tx + assertThat(edgeCollection.getEdge(createdDoc.getKey(), BaseEdgeDocument.class, null)).isNotNull(); + + // assert that the edge has been deleted from within the tx + assertThat(edgeCollection.getEdge(createdDoc.getKey(), BaseEdgeDocument.class, + new GraphDocumentReadOptions().streamTransactionId(tx.getId()))).isNull(); + + db.commitStreamTransaction(tx.getId()); + + // assert that the edge has been deleted after commit + assertThat(edgeCollection.getEdge(createdDoc.getKey(), BaseEdgeDocument.class, null)).isNull(); + } + +} diff --git a/test-functional/src/test/java/com/arangodb/StreamTransactionTest.java b/test-functional/src/test/java/com/arangodb/StreamTransactionTest.java new file mode 100644 index 000000000..826a6696f --- /dev/null +++ b/test-functional/src/test/java/com/arangodb/StreamTransactionTest.java @@ -0,0 +1,820 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb; + +import com.arangodb.entity.*; +import com.arangodb.model.*; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; + +import java.io.IOException; +import java.util.*; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.catchThrowable; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + +/** + * @author Michele Rastelli + */ +class StreamTransactionTest extends BaseJunit5 { + + private static final String COLLECTION_NAME = "StreamTransactionTest_collection"; + + @BeforeAll + static void init() { + initCollections(COLLECTION_NAME); + } + + @ParameterizedTest + @MethodSource("dbs") + void beginStreamTransaction(ArangoDatabase db) { + assumeTrue(isSingleServer()); + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + StreamTransactionEntity tx = db.beginStreamTransaction(null); + assertThat(tx.getId()).isNotNull(); + assertThat(tx.getStatus()).isEqualTo(StreamTransactionStatus.running); + db.abortStreamTransaction(tx.getId()); + } + + @ParameterizedTest + @MethodSource("dbs") + void beginStreamTransactionWithNonExistingCollectionsShouldThrow(ArangoDatabase db) { + assumeTrue(isSingleServer()); + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + Throwable thrown = catchThrowable(() -> + db.beginStreamTransaction(new StreamTransactionOptions().writeCollections("notExistingCollection"))); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + } + + @ParameterizedTest + @MethodSource("dbs") + void abortStreamTransaction(ArangoDatabase db) { + assumeTrue(isSingleServer()); + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + StreamTransactionEntity begunTx = db.beginStreamTransaction(null); + StreamTransactionEntity abortedTx = db.abortStreamTransaction(begunTx.getId()); + + assertThat(abortedTx.getId()).isNotNull(); + assertThat(abortedTx.getId()).isEqualTo(begunTx.getId()); + assertThat(abortedTx.getStatus()).isEqualTo(StreamTransactionStatus.aborted); + } + + @ParameterizedTest + @MethodSource("dbs") + void abortStreamTransactionTwice(ArangoDatabase db) { + assumeTrue(isSingleServer()); + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + StreamTransactionEntity begunTx = db.beginStreamTransaction(null); + db.abortStreamTransaction(begunTx.getId()); + db.abortStreamTransaction(begunTx.getId()); + } + + @ParameterizedTest + @MethodSource("dbs") + void abortStreamTransactionWhenTransactionIdDoesNotExistsShouldThrow(ArangoDatabase db) { + assumeTrue(isSingleServer()); + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + Throwable thrown = catchThrowable(() -> db.abortStreamTransaction("000000")); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + } + + @ParameterizedTest + @MethodSource("dbs") + void abortStreamTransactionWithInvalidTransactionIdShouldThrow(ArangoDatabase db) { + assumeTrue(isSingleServer()); + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + Throwable thrown = catchThrowable(() -> db.abortStreamTransaction("invalidTransactionId")); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + } + + @ParameterizedTest + @MethodSource("dbs") + void abortCommittedStreamTransactionShouldThrow(ArangoDatabase db) { + assumeTrue(isSingleServer()); + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + StreamTransactionEntity createdTx = db.beginStreamTransaction(null); + db.commitStreamTransaction(createdTx.getId()); + Throwable thrown = catchThrowable(() -> db.abortStreamTransaction(createdTx.getId())); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + } + + @ParameterizedTest + @MethodSource("dbs") + void getStreamTransaction(ArangoDatabase db) { + assumeTrue(isSingleServer()); + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + StreamTransactionEntity createdTx = db.beginStreamTransaction(null); + StreamTransactionEntity gotTx = db.getStreamTransaction(createdTx.getId()); + + assertThat(gotTx.getId()).isNotNull(); + assertThat(gotTx.getId()).isEqualTo(createdTx.getId()); + assertThat(gotTx.getStatus()).isEqualTo(StreamTransactionStatus.running); + + db.abortStreamTransaction(createdTx.getId()); + } + + @ParameterizedTest + @MethodSource("dbs") + void getStreamTransactionWhenTransactionIdDoesNotExistsShouldThrow(ArangoDatabase db) { + assumeTrue(isSingleServer()); + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + Throwable thrown = catchThrowable(() -> db.getStreamTransaction("000000")); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + } + + @ParameterizedTest + @MethodSource("dbs") + void getStreamTransactionWithInvalidTransactionIdShouldThrow(ArangoDatabase db) { + assumeTrue(isSingleServer()); + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + Throwable thrown = catchThrowable(() -> db.getStreamTransaction("invalidTransactionId")); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + } + + @ParameterizedTest + @MethodSource("dbs") + void commitStreamTransaction(ArangoDatabase db) { + assumeTrue(isSingleServer()); + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + StreamTransactionEntity createdTx = db.beginStreamTransaction(null); + StreamTransactionEntity committedTx = db.commitStreamTransaction(createdTx.getId()); + + assertThat(committedTx.getId()).isNotNull(); + assertThat(committedTx.getId()).isEqualTo(createdTx.getId()); + assertThat(committedTx.getStatus()).isEqualTo(StreamTransactionStatus.committed); + } + + @ParameterizedTest + @MethodSource("dbs") + void commitStreamTransactionTwice(ArangoDatabase db) { + assumeTrue(isSingleServer()); + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + StreamTransactionEntity createdTx = db.beginStreamTransaction(null); + db.commitStreamTransaction(createdTx.getId()); + db.commitStreamTransaction(createdTx.getId()); + } + + @ParameterizedTest + @MethodSource("dbs") + void commitStreamTransactionWhenTransactionIdDoesNotExistsShouldThrow(ArangoDatabase db) { + assumeTrue(isSingleServer()); + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + Throwable thrown = catchThrowable(() -> db.commitStreamTransaction("000000")); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + } + + @ParameterizedTest + @MethodSource("dbs") + void commitStreamTransactionWithInvalidTransactionIdShouldThrow(ArangoDatabase db) { + assumeTrue(isSingleServer()); + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + Throwable thrown = catchThrowable(() -> db.commitStreamTransaction("invalidTransactionId")); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + } + + @ParameterizedTest + @MethodSource("dbs") + void commitAbortedStreamTransactionShouldThrow(ArangoDatabase db) { + assumeTrue(isSingleServer()); + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + StreamTransactionEntity createdTx = db.beginStreamTransaction(null); + db.abortStreamTransaction(createdTx.getId()); + Throwable thrown = catchThrowable(() -> db.commitStreamTransaction(createdTx.getId())); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + } + + @ParameterizedTest + @MethodSource("dbs") + void getDocument(ArangoDatabase db) { + assumeTrue(isSingleServer()); + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + StreamTransactionEntity tx = db + .beginStreamTransaction(new StreamTransactionOptions().readCollections(COLLECTION_NAME)); + ArangoCollection collection = db.collection(COLLECTION_NAME); + + // insert a document from outside the tx + DocumentCreateEntity externalDoc = collection + .insertDocument(new BaseDocument(), null); + + // assert that the document is not found from within the tx + assertThat(collection.getDocument(externalDoc.getKey(), BaseDocument.class, + new DocumentReadOptions().streamTransactionId(tx.getId()))).isNull(); + + db.abortStreamTransaction(tx.getId()); + } + + @ParameterizedTest + @MethodSource("dbs") + void getDocumentWithNonExistingTransactionIdShouldThrow(ArangoDatabase db) { + assumeTrue(isSingleServer()); + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + ArangoCollection collection = db.collection(COLLECTION_NAME); + + Throwable thrown = catchThrowable(() -> collection + .getDocument("docId", BaseDocument.class, new DocumentReadOptions().streamTransactionId("123456"))); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + } + + @ParameterizedTest + @MethodSource("dbs") + void getDocumentWithInvalidTransactionIdShouldThrow(ArangoDatabase db) { + assumeTrue(isSingleServer()); + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + ArangoCollection collection = db.collection(COLLECTION_NAME); + Throwable thrown = catchThrowable(() -> collection + .getDocument("docId", BaseDocument.class, new DocumentReadOptions().streamTransactionId("abcde"))); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + } + + @ParameterizedTest + @MethodSource("dbs") + void getDocuments(ArangoDatabase db) { + assumeTrue(isSingleServer()); + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + StreamTransactionEntity tx = db + .beginStreamTransaction(new StreamTransactionOptions().readCollections(COLLECTION_NAME)); + ArangoCollection collection = db.collection(COLLECTION_NAME); + + // insert documents from outside the tx + DocumentCreateEntity externalDoc1 = collection + .insertDocument(new BaseDocument(), null); + + DocumentCreateEntity externalDoc2 = collection + .insertDocument(new BaseDocument(), null); + + // assert that the documents are not found from within the tx + MultiDocumentEntity documents = collection + .getDocuments(Arrays.asList(externalDoc1.getId(), externalDoc2.getId()), BaseDocument.class, + new DocumentReadOptions().streamTransactionId(tx.getId())); + + assertThat(documents.getDocuments()).isEmpty(); + + db.abortStreamTransaction(tx.getId()); + } + + @ParameterizedTest + @MethodSource("dbs") + void insertDocument(ArangoDatabase db) { + assumeTrue(isSingleServer()); + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + StreamTransactionEntity tx = db.beginStreamTransaction( + new StreamTransactionOptions().readCollections(COLLECTION_NAME).writeCollections(COLLECTION_NAME)); + ArangoCollection collection = db.collection(COLLECTION_NAME); + + // insert a document from within the tx + DocumentCreateEntity txDoc = collection + .insertDocument(new BaseDocument(), new DocumentCreateOptions().streamTransactionId(tx.getId())); + + // assert that the document is not found from outside the tx + assertThat(collection.getDocument(txDoc.getKey(), BaseDocument.class, null)).isNull(); + + // assert that the document is found from within the tx + assertThat(collection.getDocument(txDoc.getKey(), BaseDocument.class, + new DocumentReadOptions().streamTransactionId(tx.getId()))).isNotNull(); + + db.commitStreamTransaction(tx.getId()); + + // assert that the document is found after commit + assertThat(collection.getDocument(txDoc.getKey(), BaseDocument.class, null)).isNotNull(); + } + + @ParameterizedTest + @MethodSource("dbs") + void insertDocuments(ArangoDatabase db) { + assumeTrue(isSingleServer()); + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + StreamTransactionEntity tx = db.beginStreamTransaction( + new StreamTransactionOptions().readCollections(COLLECTION_NAME).writeCollections(COLLECTION_NAME)); + ArangoCollection collection = db.collection(COLLECTION_NAME); + + // insert documents from within the tx + MultiDocumentEntity> txDocs = collection + .insertDocuments(Arrays.asList(new BaseDocument(), new BaseDocument(), new BaseDocument()), + new DocumentCreateOptions().streamTransactionId(tx.getId()), BaseDocument.class); + + List keys = txDocs.getDocuments().stream().map(DocumentEntity::getKey).collect(Collectors.toList()); + + // assert that the documents are not found from outside the tx + assertThat(collection.getDocuments(keys, BaseDocument.class, null).getDocuments()).isEmpty(); + + // assert that the documents are found from within the tx + assertThat(collection + .getDocuments(keys, BaseDocument.class, new DocumentReadOptions().streamTransactionId(tx.getId())) + .getDocuments()).hasSize(keys.size()); + + db.commitStreamTransaction(tx.getId()); + + // assert that the document is found after commit + assertThat(collection.getDocuments(keys, BaseDocument.class, null).getDocuments()).hasSize(keys.size()); + } + + @ParameterizedTest + @MethodSource("dbs") + void replaceDocument(ArangoDatabase db) { + assumeTrue(isSingleServer()); + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + doc.addAttribute("test", "foo"); + + ArangoCollection collection = db.collection(COLLECTION_NAME); + DocumentCreateEntity createdDoc = collection.insertDocument(doc, null); + + StreamTransactionEntity tx = db.beginStreamTransaction( + new StreamTransactionOptions().readCollections(COLLECTION_NAME).writeCollections(COLLECTION_NAME)); + + // replace document from within the tx + doc.updateAttribute("test", "bar"); + collection.replaceDocument(createdDoc.getKey(), doc, + new DocumentReplaceOptions().streamTransactionId(tx.getId())); + + // assert that the document has not been replaced from outside the tx + assertThat(collection.getDocument(createdDoc.getKey(), BaseDocument.class, null) + .getProperties()).containsEntry("test", "foo"); + + // assert that the document has been replaced from within the tx + assertThat(collection.getDocument(createdDoc.getKey(), BaseDocument.class, + new DocumentReadOptions().streamTransactionId(tx.getId())).getProperties()).containsEntry("test", + "bar"); + + db.commitStreamTransaction(tx.getId()); + + // assert that the document has been replaced after commit + assertThat(collection.getDocument(createdDoc.getKey(), BaseDocument.class, null) + .getProperties()).containsEntry("test", "bar"); + } + + @ParameterizedTest + @MethodSource("dbs") + void replaceDocuments(ArangoDatabase db) { + assumeTrue(isSingleServer()); + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + List docs = IntStream.range(0, 3).mapToObj(it -> new BaseDocument()) + .peek(doc -> doc.addAttribute("test", "foo")).collect(Collectors.toList()); + + ArangoCollection collection = db.collection(COLLECTION_NAME); + List createdDocs = collection + .insertDocuments(docs, new DocumentCreateOptions().returnNew(true), BaseDocument.class).getDocuments().stream() + .map(DocumentCreateEntity::getNew).collect(Collectors.toList()); + + List keys = createdDocs.stream().map(BaseDocument::getKey).collect(Collectors.toList()); + + StreamTransactionEntity tx = db.beginStreamTransaction( + new StreamTransactionOptions().readCollections(COLLECTION_NAME).writeCollections(COLLECTION_NAME)); + + List modifiedDocs = createdDocs.stream() + .peek(doc -> doc.updateAttribute("test", "bar")) + .collect(Collectors.toList()); + + // replace document from within the tx + collection + .replaceDocuments(modifiedDocs, new DocumentReplaceOptions().streamTransactionId(tx.getId())); + + // assert that the documents has not been replaced from outside the tx + collection.getDocuments(keys, BaseDocument.class, null).getDocuments().stream() + .map(it -> ((String) it.getAttribute("test"))) + .forEach(it -> assertThat(it).isEqualTo("foo")); + + // assert that the document has been replaced from within the tx + collection + .getDocuments(keys, BaseDocument.class, new DocumentReadOptions().streamTransactionId(tx.getId())) + .getDocuments().stream().map(it -> ((String) it.getAttribute("test"))) + .forEach(it -> assertThat(it).isEqualTo("bar")); + + db.commitStreamTransaction(tx.getId()); + + // assert that the document has been replaced after commit + collection.getDocuments(keys, BaseDocument.class, null).getDocuments().stream() + .map(it -> ((String) it.getAttribute("test"))) + .forEach(it -> assertThat(it).isEqualTo("bar")); + } + + @ParameterizedTest + @MethodSource("dbs") + void updateDocument(ArangoDatabase db) { + assumeTrue(isSingleServer()); + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + BaseDocument doc = new BaseDocument(UUID.randomUUID().toString()); + doc.addAttribute("test", "foo"); + + ArangoCollection collection = db.collection(COLLECTION_NAME); + DocumentCreateEntity createdDoc = collection.insertDocument(doc, null); + + StreamTransactionEntity tx = db.beginStreamTransaction( + new StreamTransactionOptions().readCollections(COLLECTION_NAME).writeCollections(COLLECTION_NAME)); + + // update document from within the tx + doc.updateAttribute("test", "bar"); + collection + .updateDocument(createdDoc.getKey(), doc, new DocumentUpdateOptions().streamTransactionId(tx.getId())); + + // assert that the document has not been updated from outside the tx + assertThat(collection.getDocument(createdDoc.getKey(), BaseDocument.class, null) + .getProperties()).containsEntry("test", "foo"); + + // assert that the document has been updated from within the tx + assertThat(collection.getDocument(createdDoc.getKey(), BaseDocument.class, + new DocumentReadOptions().streamTransactionId(tx.getId())).getProperties()).containsEntry("test", "bar") + ; + + db.commitStreamTransaction(tx.getId()); + + // assert that the document has been updated after commit + assertThat(collection.getDocument(createdDoc.getKey(), BaseDocument.class, null) + .getProperties()).containsEntry("test", "bar"); + + } + + @ParameterizedTest + @MethodSource("dbs") + void updateDocuments(ArangoDatabase db) { + assumeTrue(isSingleServer()); + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + List docs = IntStream.range(0, 3).mapToObj(it -> new BaseDocument()) + .peek(doc -> doc.addAttribute("test", "foo")).collect(Collectors.toList()); + + ArangoCollection collection = db.collection(COLLECTION_NAME); + List createdDocs = collection + .insertDocuments(docs, new DocumentCreateOptions().returnNew(true), BaseDocument.class).getDocuments().stream() + .map(DocumentCreateEntity::getNew).collect(Collectors.toList()); + + List keys = createdDocs.stream().map(BaseDocument::getKey).collect(Collectors.toList()); + + StreamTransactionEntity tx = db.beginStreamTransaction( + new StreamTransactionOptions().readCollections(COLLECTION_NAME).writeCollections(COLLECTION_NAME)); + + List modifiedDocs = createdDocs.stream() + .peek(doc -> doc.updateAttribute("test", "bar")) + .collect(Collectors.toList()); + + // update documents from within the tx + collection + .updateDocuments(modifiedDocs, new DocumentUpdateOptions().streamTransactionId(tx.getId())); + + // assert that the documents have not been updated from outside the tx + collection.getDocuments(keys, BaseDocument.class, null).getDocuments().stream() + .map(it -> ((String) it.getAttribute("test"))) + .forEach(it -> assertThat(it).isEqualTo("foo")); + + // assert that the documents have been updated from within the tx + collection + .getDocuments(keys, BaseDocument.class, new DocumentReadOptions().streamTransactionId(tx.getId())) + .getDocuments().stream().map(it -> ((String) it.getAttribute("test"))) + .forEach(it -> assertThat(it).isEqualTo("bar")); + + db.commitStreamTransaction(tx.getId()); + + // assert that the document has been updated after commit + collection.getDocuments(keys, BaseDocument.class, null).getDocuments().stream() + .map(it -> ((String) it.getAttribute("test"))) + .forEach(it -> assertThat(it).isEqualTo("bar")); + } + + @ParameterizedTest + @MethodSource("dbs") + void deleteDocument(ArangoDatabase db) { + assumeTrue(isSingleServer()); + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + ArangoCollection collection = db.collection(COLLECTION_NAME); + DocumentCreateEntity createdDoc = collection + .insertDocument(new BaseDocument(), null); + + StreamTransactionEntity tx = db.beginStreamTransaction( + new StreamTransactionOptions().readCollections(COLLECTION_NAME).writeCollections(COLLECTION_NAME)); + + // delete document from within the tx + collection + .deleteDocument(createdDoc.getKey(), new DocumentDeleteOptions().streamTransactionId(tx.getId())); + + // assert that the document has not been deleted from outside the tx + assertThat(collection.getDocument(createdDoc.getKey(), BaseDocument.class, null)).isNotNull(); + + // assert that the document has been deleted from within the tx + assertThat(collection.getDocument(createdDoc.getKey(), BaseDocument.class, + new DocumentReadOptions().streamTransactionId(tx.getId()))).isNull(); + + db.commitStreamTransaction(tx.getId()); + + // assert that the document has been deleted after commit + assertThat(collection.getDocument(createdDoc.getKey(), BaseDocument.class, null)).isNull(); + } + + @ParameterizedTest + @MethodSource("dbs") + void deleteDocuments(ArangoDatabase db) { + assumeTrue(isSingleServer()); + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + ArangoCollection collection = db.collection(COLLECTION_NAME); + List keys = collection + .insertDocuments(Arrays.asList(new BaseDocument(), new BaseDocument(), new BaseDocument())) + .getDocuments().stream().map(DocumentEntity::getKey).collect(Collectors.toList()); + + StreamTransactionEntity tx = db.beginStreamTransaction( + new StreamTransactionOptions().readCollections(COLLECTION_NAME).writeCollections(COLLECTION_NAME)); + + // delete document from within the tx + collection + .deleteDocuments(keys, new DocumentDeleteOptions().streamTransactionId(tx.getId())); + + // assert that the documents has not been deleted from outside the tx + assertThat(collection.getDocuments(keys, BaseDocument.class, null).getDocuments()).hasSize(keys.size()); + + // assert that the document has been deleted from within the tx + assertThat(collection + .getDocuments(keys, BaseDocument.class, new DocumentReadOptions().streamTransactionId(tx.getId())) + .getDocuments()).isEmpty(); + + db.commitStreamTransaction(tx.getId()); + + // assert that the document has been deleted after commit + assertThat(collection.getDocuments(keys, BaseDocument.class, null).getDocuments()).isEmpty(); + } + + @ParameterizedTest + @MethodSource("dbs") + void documentExists(ArangoDatabase db) { + assumeTrue(isSingleServer()); + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + StreamTransactionEntity tx = db + .beginStreamTransaction(new StreamTransactionOptions().readCollections(COLLECTION_NAME)); + ArangoCollection collection = db.collection(COLLECTION_NAME); + + // insert a document from outside the tx + DocumentCreateEntity externalDoc = collection + .insertDocument(new BaseDocument(), null); + + // assert that the document is not found from within the tx + assertThat(collection + .documentExists(externalDoc.getKey(), new DocumentExistsOptions().streamTransactionId(tx.getId()))).isFalse(); + + db.abortStreamTransaction(tx.getId()); + } + + @ParameterizedTest + @MethodSource("dbs") + void count(ArangoDatabase db) { + assumeTrue(isSingleServer()); + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + ArangoCollection collection = db.collection(COLLECTION_NAME); + Long initialCount = collection.count().getCount(); + + StreamTransactionEntity tx = db + .beginStreamTransaction(new StreamTransactionOptions().readCollections(COLLECTION_NAME)); + + // insert a document from outside the tx + collection.insertDocument(new BaseDocument(), null); + + // assert that the document is not counted from within the tx + assertThat(collection.count(new CollectionCountOptions().streamTransactionId(tx.getId())) + .getCount()).isEqualTo(initialCount); + + db.abortStreamTransaction(tx.getId()); + } + + @ParameterizedTest + @MethodSource("dbs") + void truncate(ArangoDatabase db) { + assumeTrue(isSingleServer()); + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + ArangoCollection collection = db.collection(COLLECTION_NAME); + collection.insertDocument(new BaseDocument(), null); + + StreamTransactionEntity tx = db.beginStreamTransaction( + new StreamTransactionOptions().readCollections(COLLECTION_NAME).writeCollections(COLLECTION_NAME)); + + // truncate document from within the tx + collection.truncate(new CollectionTruncateOptions().streamTransactionId(tx.getId())); + + // assert that the collection has not been truncated from outside the tx + assertThat(collection.count().getCount()).isPositive(); + + // assert that the collection has been truncated from within the tx + assertThat(collection.count(new CollectionCountOptions().streamTransactionId(tx.getId())) + .getCount()).isZero(); + + db.commitStreamTransaction(tx.getId()); + + // assert that the collection has been truncated after commit + assertThat(collection.count().getCount()).isZero(); + } + + @ParameterizedTest + @MethodSource("dbs") + void createCursor(ArangoDatabase db) { + assumeTrue(isSingleServer()); + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + StreamTransactionEntity tx = db + .beginStreamTransaction(new StreamTransactionOptions().readCollections(COLLECTION_NAME)); + ArangoCollection collection = db.collection(COLLECTION_NAME); + + // insert a document from outside the tx + DocumentCreateEntity externalDoc = collection + .insertDocument(new BaseDocument(), null); + + final Map bindVars = new HashMap<>(); + bindVars.put("@collection", COLLECTION_NAME); + bindVars.put("key", externalDoc.getKey()); + + ArangoCursor cursor = db + .query("FOR doc IN @@collection FILTER doc._key == @key RETURN doc", BaseDocument.class, bindVars, + new AqlQueryOptions().streamTransactionId(tx.getId())); + + // assert that the document is not found from within the tx + assertThat(cursor.hasNext()).isFalse(); + + db.abortStreamTransaction(tx.getId()); + } + + @ParameterizedTest + @MethodSource("dbs") + void nextCursor(ArangoDatabase db) { + assumeTrue(isSingleServer()); + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + StreamTransactionEntity tx = db.beginStreamTransaction( + new StreamTransactionOptions().readCollections(COLLECTION_NAME).writeCollections(COLLECTION_NAME)); + ArangoCollection collection = db.collection(COLLECTION_NAME); + + // insert documents from within the tx + List keys = collection + .insertDocuments(IntStream.range(0, 10).mapToObj(it -> new BaseDocument()).collect(Collectors.toList()), + new DocumentCreateOptions().streamTransactionId(tx.getId())).getDocuments().stream() + .map(DocumentEntity::getKey).collect(Collectors.toList()); + + final Map bindVars = new HashMap<>(); + bindVars.put("@collection", COLLECTION_NAME); + bindVars.put("keys", keys); + + ArangoCursor cursor = db + .query("FOR doc IN @@collection FILTER CONTAINS_ARRAY(@keys, doc._key) RETURN doc", BaseDocument.class, bindVars, + new AqlQueryOptions().streamTransactionId(tx.getId()).batchSize(2)); + + List docs = cursor.asListRemaining(); + + // assert that all the keys are returned from the query + assertThat(docs.stream().map(BaseDocument::getKey).collect(Collectors.toList())).containsAll(keys); + + db.abortStreamTransaction(tx.getId()); + } + + @ParameterizedTest + @MethodSource("dbs") + void getStreamTransactions(ArangoDatabase db) { + assumeTrue(isSingleServer()); + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + StreamTransactionEntity tx1 = db.beginStreamTransaction(null); + StreamTransactionEntity tx2 = db.beginStreamTransaction(null); + + List createdIds = Arrays.asList(tx1.getId(), tx2.getId()); + Set gotTxs = db.getStreamTransactions().stream(). + filter(it -> createdIds.contains(it.getId())).collect(Collectors.toSet()); + + assertThat(gotTxs).hasSameSizeAs(createdIds); + assertThat(gotTxs.stream() + .allMatch(it -> it.getState() == StreamTransactionStatus.running)).isTrue(); + + db.abortStreamTransaction(tx1.getId()); + db.abortStreamTransaction(tx2.getId()); + } + + @ParameterizedTest + @MethodSource("dbs") + void transactionAllowImplicitFalse(ArangoDatabase db) { + assumeTrue(isSingleServer()); + assumeTrue(isAtLeastVersion(3, 5)); + assumeTrue(isStorageEngine(ArangoDBEngine.StorageEngineName.rocksdb)); + + StreamTransactionEntity tx = db + .beginStreamTransaction(new StreamTransactionOptions().allowImplicit(false)); + ArangoCollection collection = db.collection(COLLECTION_NAME); + + // insert a document from outside the tx + DocumentCreateEntity externalDoc = collection + .insertDocument(new BaseDocument(), null); + + // assert that we cannot read from collection + Throwable thrown = catchThrowable(() -> collection.getDocument(externalDoc.getKey(), BaseDocument.class, + new DocumentReadOptions().streamTransactionId(tx.getId()))); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + ArangoDBException e = (ArangoDBException) thrown; + assertThat(e.getResponseCode()).isEqualTo(400); + assertThat(e.getErrorNum()).isEqualTo(1652); + assertThat(e.getMessage()).contains("unregistered collection used in transaction"); + + db.abortStreamTransaction(tx.getId()); + } + + @ParameterizedTest + @MethodSource("dbs") + void transactionDirtyRead(ArangoDatabase db) throws IOException { + assumeTrue(isCluster()); + assumeTrue(isAtLeastVersion(3, 10)); + + ArangoCollection collection = db.collection(COLLECTION_NAME); + DocumentCreateEntity doc = collection.insertDocument(new BaseDocument()); + + StreamTransactionEntity tx = db + .beginStreamTransaction(new StreamTransactionOptions() + .readCollections(COLLECTION_NAME) + .allowDirtyRead(true)); + + MultiDocumentEntity readDocs = collection.getDocuments(Collections.singletonList(doc.getKey()), + BaseDocument.class, + new DocumentReadOptions().streamTransactionId(tx.getId())); + + assertThat(readDocs.isPotentialDirtyRead()).isTrue(); + assertThat(readDocs.getDocuments()).hasSize(1); + + final ArangoCursor cursor = db.query("FOR i IN @@col RETURN i", BaseDocument.class, + Collections.singletonMap("@col", COLLECTION_NAME), + new AqlQueryOptions().streamTransactionId(tx.getId())); + assertThat(cursor.isPotentialDirtyRead()).isTrue(); + cursor.close(); + + db.abortStreamTransaction(tx.getId()); + } + +} diff --git a/test-functional/src/test/java/com/arangodb/UserAgentAsyncTest.java b/test-functional/src/test/java/com/arangodb/UserAgentAsyncTest.java new file mode 100644 index 000000000..a1059b86c --- /dev/null +++ b/test-functional/src/test/java/com/arangodb/UserAgentAsyncTest.java @@ -0,0 +1,38 @@ +package com.arangodb; + +import com.fasterxml.jackson.databind.JsonNode; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.EnumSource; + +import java.util.concurrent.ExecutionException; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + +class UserAgentAsyncTest extends BaseJunit5 { + @ParameterizedTest + @EnumSource(Protocol.class) + void userAgentHeader(Protocol protocol) throws ExecutionException, InterruptedException { + assumeTrue(!protocol.equals(Protocol.VST) || BaseJunit5.isLessThanVersion(3, 12)); + + ArangoDBAsync adb = new ArangoDB.Builder() + .loadProperties(config) + .protocol(protocol) + .build() + .async(); + + Response resp = adb.execute(Request.builder() + .method(Request.Method.GET) + .path("/_admin/echo") + .build(), JsonNode.class) + .get(); + String headerValue = resp.getBody().get("headers").get("x-arango-driver").textValue(); + + String jvmVersion = System.getProperty("java.specification.version"); + String expected = "JavaDriver/" + PackageVersion.VERSION + " (JVM/" + jvmVersion + ")"; + + assertThat(headerValue).isEqualTo(expected); + adb.shutdown(); + } +} diff --git a/test-functional/src/test/java/com/arangodb/UserAgentTest.java b/test-functional/src/test/java/com/arangodb/UserAgentTest.java new file mode 100644 index 000000000..871e77faf --- /dev/null +++ b/test-functional/src/test/java/com/arangodb/UserAgentTest.java @@ -0,0 +1,49 @@ +package com.arangodb; + +import com.fasterxml.jackson.databind.JsonNode; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.EnumSource; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + +class UserAgentTest extends BaseJunit5 { + + private static final String EXPECTED_VERSION = "7.22.0"; + + private static final boolean SHADED = Boolean.parseBoolean(System.getProperty("shaded")); + + @Test + void packageVersion() { + assertThat(PackageVersion.VERSION).isEqualTo(EXPECTED_VERSION + (SHADED ? "-shaded" : "")); + } + + @Test + void packageVersionIsShaded() { + assertThat(PackageVersion.SHADED).isEqualTo(SHADED); + } + + @ParameterizedTest + @EnumSource(Protocol.class) + void userAgentHeader(Protocol protocol) { + assumeTrue(!protocol.equals(Protocol.VST) || BaseJunit5.isLessThanVersion(3, 12)); + + ArangoDB adb = new ArangoDB.Builder() + .loadProperties(config) + .protocol(protocol) + .build(); + + Response resp = adb.execute(Request.builder() + .method(Request.Method.GET) + .path("/_admin/echo") + .build(), JsonNode.class); + String headerValue = resp.getBody().get("headers").get("x-arango-driver").textValue(); + + String jvmVersion = System.getProperty("java.specification.version"); + String expected = "JavaDriver/" + PackageVersion.VERSION + " (JVM/" + jvmVersion + ")"; + + assertThat(headerValue).isEqualTo(expected); + adb.shutdown(); + } +} diff --git a/test-functional/src/test/java/com/arangodb/config/ConfigUtils.java b/test-functional/src/test/java/com/arangodb/config/ConfigUtils.java new file mode 100644 index 000000000..dcef7a620 --- /dev/null +++ b/test-functional/src/test/java/com/arangodb/config/ConfigUtils.java @@ -0,0 +1,23 @@ +package com.arangodb.config; + +import java.util.Properties; + +public class ConfigUtils { + + public static ArangoConfigProperties loadConfig() { + return ArangoConfigProperties.fromFile(); + } + + public static ArangoConfigProperties loadConfig(final String location) { + return ArangoConfigProperties.fromFile(location); + } + + public static ArangoConfigProperties loadConfig(final String location, final String prefix) { + return ArangoConfigProperties.fromFile(location, prefix); + } + + public static ArangoConfigProperties loadConfig(final Properties properties, final String prefix) { + return ArangoConfigProperties.fromProperties(properties, prefix); + } + +} diff --git a/test-functional/src/test/java/com/arangodb/internal/HostHandlerTest.java b/test-functional/src/test/java/com/arangodb/internal/HostHandlerTest.java new file mode 100644 index 000000000..109a9eb5e --- /dev/null +++ b/test-functional/src/test/java/com/arangodb/internal/HostHandlerTest.java @@ -0,0 +1,174 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.internal; + +import com.arangodb.ArangoDBException; +import com.arangodb.ArangoDBMultipleException; +import com.arangodb.config.HostDescription; +import com.arangodb.internal.net.*; +import org.junit.jupiter.api.Test; + +import java.util.Collections; +import java.util.List; +import java.util.concurrent.CompletableFuture; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.fail; + +/** + * @author Mark Vollmary + */ +class HostHandlerTest { + + private static final ConnectionPool mockCP = new ConnectionPool() { + @Override + public Connection createConnection() { + return null; + } + + @Override + public CompletableFuture connection() { + return null; + } + + @Override + public void release(Connection connection) { + + } + + @Override + public void setJwt(String jwt) { + + } + + @Override + public void close() { + + } + }; + + private static final Host HOST_0 = new HostImpl(mockCP, new HostDescription("172.28.0.1", 8529)); + private static final HostResolver SINGLE_HOST = () -> { + HostSet set = new HostSet(Collections.emptyList()); + set.addHost(HOST_0); + return set; + }; + private static final Host HOST_1 = new HostImpl(mockCP, new HostDescription("127.0.0.2", 8529)); + private static final Host HOST_2 = new HostImpl(mockCP, new HostDescription("127.0.0.3", 8529)); + private static final HostResolver MULTIPLE_HOSTS = () -> { + HostSet set = new HostSet(Collections.emptyList()); + set.addHost(HOST_0); + set.addHost(HOST_1); + set.addHost(HOST_2); + return set; + }; + + @Test + void fallbackHostHandlerSingleHost() { + final HostHandler handler = new FallbackHostHandler(SINGLE_HOST); + assertThat(handler.get(null, null)).isEqualTo(HOST_0); + handler.fail(new RuntimeException()); + assertThat(handler.get(null, null)).isEqualTo(HOST_0); + } + + @Test + void fallbackHostHandlerMultipleHosts() { + final HostHandler handler = new FallbackHostHandler(MULTIPLE_HOSTS); + for (int i = 0; i < 3; i++) { + assertThat(handler.get(null, null)).isEqualTo(HOST_0); + handler.fail(new RuntimeException("HOST_0 failed")); + assertThat(handler.get(null, null)).isEqualTo(HOST_1); + handler.fail(new RuntimeException("HOST_1 failed")); + assertThat(handler.get(null, null)).isEqualTo(HOST_2); + handler.fail(new RuntimeException("HOST_2 failed")); + if (i < 2) { + assertThat(handler.get(null, null)).isEqualTo(HOST_0); + } else { + try { + handler.get(null, null); + fail(); + } catch (ArangoDBException e) { + assertThat(e.getCause()).isNotNull(); + assertThat(e.getCause()).isInstanceOf(ArangoDBMultipleException.class); + List exceptions = ((ArangoDBMultipleException) e.getCause()).getExceptions(); + assertThat(exceptions.get(0)).isInstanceOf(RuntimeException.class); + assertThat(exceptions.get(0).getMessage()).isEqualTo("HOST_0 failed"); + assertThat(exceptions.get(1)).isInstanceOf(RuntimeException.class); + assertThat(exceptions.get(1).getMessage()).isEqualTo("HOST_1 failed"); + assertThat(exceptions.get(2)).isInstanceOf(RuntimeException.class); + assertThat(exceptions.get(2).getMessage()).isEqualTo("HOST_2 failed"); + } + } + } + } + + @Test + void randomHostHandlerSingleHost() { + final HostHandler handler = new RandomHostHandler(SINGLE_HOST, new FallbackHostHandler(SINGLE_HOST)); + assertThat(handler.get(null, null)).isEqualTo(HOST_0); + handler.fail(new RuntimeException()); + assertThat(handler.get(null, null)).isEqualTo(HOST_0); + } + + @Test + void randomHostHandlerMultipleHosts() { + final HostHandler handler = new RandomHostHandler(MULTIPLE_HOSTS, new FallbackHostHandler(MULTIPLE_HOSTS)); + + final Host pick0 = handler.get(null, null); + assertThat(pick0).isIn(HOST_0, HOST_1, HOST_2); + handler.fail(new RuntimeException()); + + final Host pick1 = handler.get(null, null); + assertThat(pick1).isIn(HOST_0, HOST_1, HOST_2); + handler.success(); + + final Host pick3 = handler.get(null, null); + assertThat(pick3) + .isIn(HOST_0, HOST_1, HOST_2) + .isEqualTo(pick1); + } + + @Test + void roundRobinHostHandlerSingleHost() { + final HostHandler handler = new RoundRobinHostHandler(SINGLE_HOST); + assertThat(handler.get(null, null)).isEqualTo(HOST_0); + handler.fail(new RuntimeException()); + assertThat(handler.get(null, null)).isEqualTo(HOST_0); + } + + @Test + void roundRobinHostHandlerMultipleHosts() { + final HostHandler handler = new RoundRobinHostHandler(MULTIPLE_HOSTS); + final Host pick0 = handler.get(null, null); + assertThat(pick0).isIn(HOST_0, HOST_1, HOST_2); + final Host pick1 = handler.get(null, null); + assertThat(pick1) + .isIn(HOST_0, HOST_1, HOST_2) + .isNotEqualTo(pick0); + final Host pick2 = handler.get(null, null); + assertThat(pick2) + .isIn(HOST_0, HOST_1, HOST_2) + .isNotIn(pick0, pick1); + final Host pick4 = handler.get(null, null); + assertThat(pick4).isEqualTo(pick0); + } + +} diff --git a/test-functional/src/test/java/com/arangodb/internal/QueueTimeMetricsImplTest.java b/test-functional/src/test/java/com/arangodb/internal/QueueTimeMetricsImplTest.java new file mode 100644 index 000000000..7c4ca4c8a --- /dev/null +++ b/test-functional/src/test/java/com/arangodb/internal/QueueTimeMetricsImplTest.java @@ -0,0 +1,61 @@ +package com.arangodb.internal; + +import com.arangodb.model.QueueTimeSample; +import org.junit.jupiter.api.Test; + +import java.util.Arrays; +import java.util.Random; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.within; + + +class QueueTimeMetricsImplTest { + private final static int QSIZE = 1024; + private final Random rnd = new Random(); + private final QueueTimeMetricsImpl q = new QueueTimeMetricsImpl(QSIZE); + + @Test + void halfSizeTest() { + testQueue(QSIZE / 2); + } + + @Test + void fullSizeTest() { + testQueue(QSIZE); + } + + @Test + void emptySizeTest() { + testQueue(0); + } + + @Test + void overSizeTest() { + testQueue((int) (QSIZE * 1.2)); + testQueue((int) (QSIZE * 3000.4)); + } + + private void testQueue(int size) { + q.clear(); + for (int i = 0; i < size; i++) { + q.add(new QueueTimeSample(i, rnd.nextDouble())); + } + QueueTimeSample[] values = q.getValues(); + assertThat(values).hasSize(Math.min(size, QSIZE)); + assertThat(q.getAvg()).isEqualTo(getAvg(values), within(1.0E-12)); + assertThat(q.getAvg()).isGreaterThanOrEqualTo(0.0); + + for (int i = 0; i < values.length; i++) { + assertThat(values[i]).isNotNull(); + if (i > 0) { + assertThat(values[i].timestamp).isGreaterThan(values[i - 1].timestamp); + } + } + } + + private double getAvg(QueueTimeSample[] elements) { + return Arrays.stream(elements).mapToDouble(it -> it.value).average().orElse(0.0); + } + +} \ No newline at end of file diff --git a/test-functional/src/test/java/com/arangodb/internal/velocystream/CommunicationTest.java b/test-functional/src/test/java/com/arangodb/internal/velocystream/CommunicationTest.java new file mode 100644 index 000000000..5219179fc --- /dev/null +++ b/test-functional/src/test/java/com/arangodb/internal/velocystream/CommunicationTest.java @@ -0,0 +1,116 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.internal.velocystream; + +import com.arangodb.ArangoDB; +import com.arangodb.ArangoDatabase; +import com.arangodb.config.ConfigUtils; +import com.arangodb.entity.ArangoDBVersion; +import com.arangodb.util.SlowTest; +import org.junit.jupiter.api.Test; + +import java.util.Collection; +import java.util.Iterator; +import java.util.concurrent.ConcurrentLinkedQueue; + +import static org.assertj.core.api.Assertions.assertThat; + +/** + * @author Mark Vollmary + */ +class CommunicationTest { + + private static final String FAST = "fast"; + private static final String SLOW = "slow"; + + @Test + void chunkSizeSmall() { + final ArangoDB arangoDB = new ArangoDB.Builder() + .loadProperties(ConfigUtils.loadConfig()) + .chunkSize(20).build(); + final ArangoDBVersion version = arangoDB.getVersion(); + assertThat(version).isNotNull(); + } + + @SlowTest + @Test + void multiThread() throws Exception { + final ArangoDB arangoDB = new ArangoDB.Builder() + .loadProperties(ConfigUtils.loadConfig()) + .build(); + arangoDB.getUsers(); // authentication and active-failover connection redirect to master + + final Collection result = new ConcurrentLinkedQueue<>(); + final Thread fast = new Thread(() -> { + arangoDB.db().query("return sleep(0.1)", null, null, null); + result.add(FAST); + }); + final Thread slow = new Thread(() -> { + arangoDB.db().query("return sleep(0.5)", null, null, null); + result.add(SLOW); + }); + slow.start(); + fast.start(); + + slow.join(); + fast.join(); + + assertThat(result.size()).isEqualTo(2); + final Iterator iterator = result.iterator(); + assertThat(iterator.next()).isEqualTo(FAST); + assertThat(iterator.next()).isEqualTo(SLOW); + } + + @SlowTest + @Test + void multiThreadSameDatabases() throws Exception { + final ArangoDB arangoDB = new ArangoDB.Builder() + .loadProperties(ConfigUtils.loadConfig()) + .build(); + arangoDB.getUsers(); // authentication and active-failover connection redirect to master + + final ArangoDatabase db = arangoDB.db(); + + final Collection result = new ConcurrentLinkedQueue<>(); + final Thread t1 = new Thread(() -> { + db.query("return sleep(0.1)", null, null, null); + result.add("1"); + }); + final Thread t2 = new Thread(() -> { + db.query("return sleep(0.1)", null, null, null); + result.add("1"); + }); + t2.start(); + t1.start(); + t2.join(); + t1.join(); + assertThat(result.size()).isEqualTo(2); + } + + @Test + void defaultMaxConnection() { + final ArangoDB arangoDB = new ArangoDB.Builder() + .loadProperties(ConfigUtils.loadConfig()) + .maxConnections(null).build(); + final ArangoDBVersion version = arangoDB.getVersion(); + assertThat(version).isNotNull(); + } +} diff --git a/test-functional/src/test/java/com/arangodb/mapping/annotations/AnnotatedEntity.java b/test-functional/src/test/java/com/arangodb/mapping/annotations/AnnotatedEntity.java new file mode 100644 index 000000000..f26fb6f25 --- /dev/null +++ b/test-functional/src/test/java/com/arangodb/mapping/annotations/AnnotatedEntity.java @@ -0,0 +1,81 @@ +package com.arangodb.mapping.annotations; + +import com.arangodb.serde.jackson.*; + +import java.util.Objects; + +public class AnnotatedEntity { + + @Id + private String id; + + @Key + private String key; + + @Rev + private String rev; + + @From + private String from; + + @To + private String to; + + public AnnotatedEntity() { + } + + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } + + public String getKey() { + return key; + } + + public void setKey(String key) { + this.key = key; + } + + public String getRev() { + return rev; + } + + public void setRev(String rev) { + this.rev = rev; + } + + public String getFrom() { + return from; + } + + public void setFrom(String from) { + this.from = from; + } + + public String getTo() { + return to; + } + + public void setTo(String to) { + this.to = to; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + AnnotatedEntity that = (AnnotatedEntity) o; + return Objects.equals(getId(), that.getId()) && Objects.equals(getKey(), that.getKey()) && Objects + .equals(getRev(), that.getRev()) && Objects.equals(getFrom(), that.getFrom()) && Objects + .equals(getTo(), that.getTo()); + } + + @Override + public int hashCode() { + return Objects.hash(getId(), getKey(), getRev(), getFrom(), getTo()); + } +} diff --git a/test-functional/src/test/java/com/arangodb/mapping/annotations/ArangoAnnotationsTest.java b/test-functional/src/test/java/com/arangodb/mapping/annotations/ArangoAnnotationsTest.java new file mode 100644 index 000000000..218ba79ae --- /dev/null +++ b/test-functional/src/test/java/com/arangodb/mapping/annotations/ArangoAnnotationsTest.java @@ -0,0 +1,64 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.mapping.annotations; + +import com.arangodb.ContentType; +import com.arangodb.serde.ArangoSerde; +import com.arangodb.serde.jackson.JacksonSerde; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.EnumSource; + +import java.util.Map; + +import static org.assertj.core.api.Assertions.assertThat; + +/** + * @author Michele Rastelli + */ +class ArangoAnnotationsTest { + + @ParameterizedTest + @EnumSource(ContentType.class) + void documentFieldAnnotations(ContentType contentType) { + ArangoSerde mapper = JacksonSerde.of(contentType); + + AnnotatedEntity e = new AnnotatedEntity(); + e.setId("Id"); + e.setKey("Key"); + e.setRev("Rev"); + e.setFrom("From"); + e.setTo("To"); + + byte[] serialized = mapper.serialize(e); + Map deserialized = mapper.deserialize(serialized, Map.class); + assertThat(deserialized) + .containsEntry("_id", e.getId()) + .containsEntry("_key", e.getKey()) + .containsEntry("_rev", e.getRev()) + .containsEntry("_from", e.getFrom()) + .containsEntry("_to", e.getTo()) + .hasSize(5); + + AnnotatedEntity deserializedEntity = mapper.deserialize(serialized, AnnotatedEntity.class); + assertThat(deserializedEntity).isEqualTo(e); + } + +} diff --git a/test-functional/src/test/java/com/arangodb/model/AqlQueryOptionsTest.java b/test-functional/src/test/java/com/arangodb/model/AqlQueryOptionsTest.java new file mode 100644 index 000000000..c779464a1 --- /dev/null +++ b/test-functional/src/test/java/com/arangodb/model/AqlQueryOptionsTest.java @@ -0,0 +1,33 @@ +package com.arangodb.model; + +import org.junit.jupiter.api.Test; + +import java.util.Arrays; +import java.util.List; + +import static org.assertj.core.api.Assertions.assertThat; + +class AqlQueryOptionsTest { + + @Test + void cloneable() { + List rules = Arrays.asList("foo", "bar"); + AqlQueryOptions options = new AqlQueryOptions() + .cache(true) + .stream(true) + .usePlanCache(true) + .rules(rules) + .shardIds("a", "b"); + AqlQueryOptions clone = options.clone(); + assertThat(clone.getCache()).isEqualTo(options.getCache()); + assertThat(clone.getStream()).isEqualTo(options.getStream()); + assertThat(clone.getUsePlanCache()).isEqualTo(options.getUsePlanCache()); + assertThat(clone.getRules()) + .isEqualTo(options.getRules()) + .isNotSameAs(options.getRules()); + assertThat(clone.getShardIds()) + .isEqualTo(options.getShardIds()) + .isNotSameAs(options.getShardIds()); + } + +} diff --git a/test-functional/src/test/java/com/arangodb/serde/CustomSerdeAsyncTest.java b/test-functional/src/test/java/com/arangodb/serde/CustomSerdeAsyncTest.java new file mode 100644 index 000000000..0f305e97c --- /dev/null +++ b/test-functional/src/test/java/com/arangodb/serde/CustomSerdeAsyncTest.java @@ -0,0 +1,246 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.serde; + + +import com.arangodb.*; +import com.arangodb.config.ConfigUtils; +import com.arangodb.internal.RequestContextHolder; +import com.arangodb.internal.serde.InternalSerde; +import com.arangodb.model.DocumentCreateOptions; +import com.arangodb.serde.jackson.JacksonSerde; +import com.fasterxml.jackson.core.JsonGenerator; +import com.fasterxml.jackson.core.JsonParser; +import com.fasterxml.jackson.databind.*; +import com.fasterxml.jackson.databind.annotation.JsonSerialize; +import com.fasterxml.jackson.databind.module.SimpleModule; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; + +import java.io.IOException; +import java.math.BigInteger; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.UUID; +import java.util.concurrent.ExecutionException; + +import static com.fasterxml.jackson.databind.DeserializationFeature.USE_BIG_INTEGER_FOR_INTS; +import static com.fasterxml.jackson.databind.SerializationFeature.WRITE_SINGLE_ELEM_ARRAYS_UNWRAPPED; +import static org.assertj.core.api.Assertions.assertThat; + + +/** + * NB: excluded from shaded tests + */ +class CustomSerdeAsyncTest { + + private static final String COLLECTION_NAME = "collection"; + private static final String PERSON_SERIALIZER_ADDED_PREFIX = "MyNameIs"; + private static final String PERSON_DESERIALIZER_ADDED_PREFIX = "Hello"; + + private static ArangoDBAsync arangoDB; + private static ArangoDatabaseAsync db; + private static ArangoCollectionAsync collection; + + @BeforeAll + static void init() throws ExecutionException, InterruptedException { + JacksonSerde serde = JacksonSerde.of(ContentType.JSON) + .configure((mapper) -> { + mapper.configure(WRITE_SINGLE_ELEM_ARRAYS_UNWRAPPED, true); + mapper.configure(USE_BIG_INTEGER_FOR_INTS, true); + SimpleModule module = new SimpleModule("PersonModule"); + module.addDeserializer(Person.class, new PersonDeserializer()); + mapper.registerModule(module); + }); + arangoDB = new ArangoDB.Builder() + .loadProperties(ConfigUtils.loadConfig()) + .serde(serde) + .protocol(Protocol.HTTP_JSON) + .build() + .async(); + + db = arangoDB.db("custom-serde-test"); + if (!db.exists().get()) { + db.create().get(); + } + + collection = db.collection(COLLECTION_NAME); + if (!collection.exists().get()) { + collection.create().get(); + } + } + + @AfterAll + static void shutdown() throws ExecutionException, InterruptedException { + if (db.exists().get()) + db.drop().get(); + } + + @Test + void customPersonDeserializer() throws ExecutionException, InterruptedException { + Person person = new Person(); + person.name = "Joe"; + Person result = collection.insertDocument( + person, + new DocumentCreateOptions().returnNew(true) + ).get().getNew(); + assertThat(result.name).isEqualTo(PERSON_DESERIALIZER_ADDED_PREFIX + PERSON_SERIALIZER_ADDED_PREFIX + person.name); + } + + @Test + void manualCustomPersonDeserializer() { + Person person = new Person(); + person.name = "Joe"; + InternalSerde serialization = arangoDB.getSerde(); + byte[] serialized = serialization.serializeUserData(person); + Person deserializedPerson = RequestContextHolder.INSTANCE.runWithCtx(RequestContext.EMPTY, () -> + serialization.deserializeUserData(serialized, Person.class)); + assertThat(deserializedPerson.name).isEqualTo(PERSON_DESERIALIZER_ADDED_PREFIX + PERSON_SERIALIZER_ADDED_PREFIX + person.name); + } + + @Test + void aqlSerialization() throws ExecutionException, InterruptedException { + String key = "test-" + UUID.randomUUID(); + + Map doc = new HashMap<>(); + doc.put("_key", key); + doc.put("arr", Collections.singletonList("hello")); + doc.put("int", 10); + + HashMap params = new HashMap<>(); + params.put("doc", doc); + params.put("@collection", COLLECTION_NAME); + + Map result = db.query( + "INSERT @doc INTO @@collection RETURN NEW", + Map.class, + params + ).get().getResult().get(0); + + assertThat(result.get("arr")).isInstanceOf(String.class); + assertThat(result.get("arr")).isEqualTo("hello"); + assertThat(result.get("int")).isInstanceOf(BigInteger.class); + assertThat(result.get("int")).isEqualTo(BigInteger.valueOf(10)); + } + + @Test + void aqlDeserialization() throws ExecutionException, InterruptedException { + String key = "test-" + UUID.randomUUID(); + + Map doc = new HashMap<>(); + doc.put("_key", key); + doc.put("arr", Collections.singletonList("hello")); + doc.put("int", 10); + + collection.insertDocument(doc).get(); + + final Map result = db.query( + "RETURN DOCUMENT(@docId)", + Map.class, + Collections.singletonMap("docId", COLLECTION_NAME + "/" + key) + ).get().getResult().get(0); + + assertThat(result.get("arr")).isInstanceOf(String.class); + assertThat(result.get("arr")).isEqualTo("hello"); + assertThat(result.get("int")).isInstanceOf(BigInteger.class); + assertThat(result.get("int")).isEqualTo(BigInteger.valueOf(10)); + } + + @Test + void insertDocument() throws ExecutionException, InterruptedException { + String key = "test-" + UUID.randomUUID(); + + Map doc = new HashMap<>(); + doc.put("_key", key); + doc.put("arr", Collections.singletonList("hello")); + doc.put("int", 10); + + Map result = collection.insertDocument( + doc, + new DocumentCreateOptions().returnNew(true) + ).get().getNew(); + + assertThat(result.get("arr")).isInstanceOf(String.class); + assertThat(result.get("arr")).isEqualTo("hello"); + assertThat(result.get("int")).isInstanceOf(BigInteger.class); + assertThat(result.get("int")).isEqualTo(BigInteger.valueOf(10)); + } + + @Test + void getDocument() throws ExecutionException, InterruptedException { + String key = "test-" + UUID.randomUUID(); + + Map doc = new HashMap<>(); + doc.put("_key", key); + doc.put("arr", Collections.singletonList("hello")); + doc.put("int", 10); + + collection.insertDocument(doc).get(); + + final Map result = db.collection(COLLECTION_NAME).getDocument( + key, + Map.class, + null).get(); + + assertThat(result.get("arr")).isInstanceOf(String.class); + assertThat(result.get("arr")).isEqualTo("hello"); + assertThat(result.get("int")).isInstanceOf(BigInteger.class); + assertThat(result.get("int")).isEqualTo(BigInteger.valueOf(10)); + } + + @Test + void parseNullString() { + final String json = RequestContextHolder.INSTANCE.runWithCtx(RequestContext.EMPTY, () -> + arangoDB.getSerde().deserializeUserData(arangoDB.getSerde().serializeUserData(null), String.class)); + assertThat(json).isNull(); + } + + static class PersonSerializer extends JsonSerializer { + @Override + public void serialize(Person value, JsonGenerator gen, SerializerProvider serializers) throws IOException { + gen.writeStartObject(); + gen.writeFieldName("name"); + gen.writeString(PERSON_SERIALIZER_ADDED_PREFIX + value.name); + gen.writeEndObject(); + } + } + + static class PersonDeserializer extends JsonDeserializer { + @Override + public Person deserialize(JsonParser parser, DeserializationContext ctx) throws IOException { + Person person = new Person(); + JsonNode rootNode = parser.getCodec().readTree(parser); + JsonNode nameNode = rootNode.get("name"); + if (nameNode != null && nameNode.isTextual()) { + person.name = PERSON_DESERIALIZER_ADDED_PREFIX + nameNode.asText(); + } + return person; + } + } + + @JsonSerialize(using = PersonSerializer.class) + static class Person { + String name; + } + +} diff --git a/test-functional/src/test/java/com/arangodb/serde/CustomSerdeTest.java b/test-functional/src/test/java/com/arangodb/serde/CustomSerdeTest.java new file mode 100644 index 000000000..58a736f6f --- /dev/null +++ b/test-functional/src/test/java/com/arangodb/serde/CustomSerdeTest.java @@ -0,0 +1,243 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.serde; + + +import com.arangodb.*; +import com.arangodb.config.ConfigUtils; +import com.arangodb.internal.RequestContextHolder; +import com.arangodb.internal.serde.InternalSerde; +import com.arangodb.model.DocumentCreateOptions; +import com.arangodb.serde.jackson.JacksonSerde; +import com.fasterxml.jackson.core.JsonGenerator; +import com.fasterxml.jackson.core.JsonParser; +import com.fasterxml.jackson.databind.*; +import com.fasterxml.jackson.databind.annotation.JsonSerialize; +import com.fasterxml.jackson.databind.module.SimpleModule; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; + +import java.io.IOException; +import java.math.BigInteger; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.UUID; + +import static com.fasterxml.jackson.databind.DeserializationFeature.USE_BIG_INTEGER_FOR_INTS; +import static com.fasterxml.jackson.databind.SerializationFeature.WRITE_SINGLE_ELEM_ARRAYS_UNWRAPPED; +import static org.assertj.core.api.Assertions.assertThat; + + +/** + * NB: excluded from shaded tests + */ +class CustomSerdeTest { + + private static final String COLLECTION_NAME = "collection"; + private static final String PERSON_SERIALIZER_ADDED_PREFIX = "MyNameIs"; + private static final String PERSON_DESERIALIZER_ADDED_PREFIX = "Hello"; + + private static ArangoDB arangoDB; + private static ArangoDatabase db; + private static ArangoCollection collection; + + @BeforeAll + static void init() { + JacksonSerde serde = JacksonSerde.of(ContentType.VPACK) + .configure((mapper) -> { + mapper.configure(WRITE_SINGLE_ELEM_ARRAYS_UNWRAPPED, true); + mapper.configure(USE_BIG_INTEGER_FOR_INTS, true); + SimpleModule module = new SimpleModule("PersonModule"); + module.addDeserializer(Person.class, new PersonDeserializer()); + mapper.registerModule(module); + }); + arangoDB = new ArangoDB.Builder() + .loadProperties(ConfigUtils.loadConfig()) + .protocol(Protocol.HTTP_VPACK) + .serde(serde).build(); + + db = arangoDB.db("custom-serde-test"); + if (!db.exists()) { + db.create(); + } + + collection = db.collection(COLLECTION_NAME); + if (!collection.exists()) { + collection.create(); + } + } + + @AfterAll + static void shutdown() { + if (db.exists()) + db.drop(); + } + + @Test + void customPersonDeserializer() { + Person person = new Person(); + person.name = "Joe"; + Person result = collection.insertDocument( + person, + new DocumentCreateOptions().returnNew(true) + ).getNew(); + assertThat(result.name).isEqualTo(PERSON_DESERIALIZER_ADDED_PREFIX + PERSON_SERIALIZER_ADDED_PREFIX + person.name); + } + + @Test + void manualCustomPersonDeserializer() { + Person person = new Person(); + person.name = "Joe"; + InternalSerde serialization = arangoDB.getSerde(); + byte[] serialized = serialization.serializeUserData(person); + Person deserializedPerson = RequestContextHolder.INSTANCE.runWithCtx(RequestContext.EMPTY, () -> + serialization.deserializeUserData(serialized, Person.class)); + assertThat(deserializedPerson.name).isEqualTo(PERSON_DESERIALIZER_ADDED_PREFIX + PERSON_SERIALIZER_ADDED_PREFIX + person.name); + } + + @Test + void aqlSerialization() { + String key = "test-" + UUID.randomUUID(); + + Map doc = new HashMap<>(); + doc.put("_key", key); + doc.put("arr", Collections.singletonList("hello")); + doc.put("int", 10); + + HashMap params = new HashMap<>(); + params.put("doc", doc); + params.put("@collection", COLLECTION_NAME); + + Map result = db.query( + "INSERT @doc INTO @@collection RETURN NEW", + Map.class, + params + ).next(); + + assertThat(result.get("arr")).isInstanceOf(String.class); + assertThat(result.get("arr")).isEqualTo("hello"); + assertThat(result.get("int")).isInstanceOf(BigInteger.class); + assertThat(result.get("int")).isEqualTo(BigInteger.valueOf(10)); + } + + @Test + void aqlDeserialization() { + String key = "test-" + UUID.randomUUID(); + + Map doc = new HashMap<>(); + doc.put("_key", key); + doc.put("arr", Collections.singletonList("hello")); + doc.put("int", 10); + + collection.insertDocument(doc); + + final Map result = db.query( + "RETURN DOCUMENT(@docId)", + Map.class, + Collections.singletonMap("docId", COLLECTION_NAME + "/" + key) + ).next(); + + assertThat(result.get("arr")).isInstanceOf(String.class); + assertThat(result.get("arr")).isEqualTo("hello"); + assertThat(result.get("int")).isInstanceOf(BigInteger.class); + assertThat(result.get("int")).isEqualTo(BigInteger.valueOf(10)); + } + + @Test + void insertDocument() { + String key = "test-" + UUID.randomUUID(); + + Map doc = new HashMap<>(); + doc.put("_key", key); + doc.put("arr", Collections.singletonList("hello")); + doc.put("int", 10); + + Map result = collection.insertDocument( + doc, + new DocumentCreateOptions().returnNew(true) + ).getNew(); + + assertThat(result.get("arr")).isInstanceOf(String.class); + assertThat(result.get("arr")).isEqualTo("hello"); + assertThat(result.get("int")).isInstanceOf(BigInteger.class); + assertThat(result.get("int")).isEqualTo(BigInteger.valueOf(10)); + } + + @Test + void getDocument() { + String key = "test-" + UUID.randomUUID(); + + Map doc = new HashMap<>(); + doc.put("_key", key); + doc.put("arr", Collections.singletonList("hello")); + doc.put("int", 10); + + collection.insertDocument(doc); + + final Map result = db.collection(COLLECTION_NAME).getDocument( + key, + Map.class, + null); + + assertThat(result.get("arr")).isInstanceOf(String.class); + assertThat(result.get("arr")).isEqualTo("hello"); + assertThat(result.get("int")).isInstanceOf(BigInteger.class); + assertThat(result.get("int")).isEqualTo(BigInteger.valueOf(10)); + } + + @Test + void parseNullString() { + final String json = RequestContextHolder.INSTANCE.runWithCtx(RequestContext.EMPTY, () -> + arangoDB.getSerde().deserializeUserData(arangoDB.getSerde().serializeUserData(null), String.class)); + assertThat(json).isNull(); + } + + static class PersonSerializer extends JsonSerializer { + @Override + public void serialize(Person value, JsonGenerator gen, SerializerProvider serializers) throws IOException { + gen.writeStartObject(); + gen.writeFieldName("name"); + gen.writeString(PERSON_SERIALIZER_ADDED_PREFIX + value.name); + gen.writeEndObject(); + } + } + + static class PersonDeserializer extends JsonDeserializer { + @Override + public Person deserialize(JsonParser parser, DeserializationContext ctx) throws IOException { + Person person = new Person(); + JsonNode rootNode = parser.getCodec().readTree(parser); + JsonNode nameNode = rootNode.get("name"); + if (nameNode != null && nameNode.isTextual()) { + person.name = PERSON_DESERIALIZER_ADDED_PREFIX + nameNode.asText(); + } + return person; + } + } + + @JsonSerialize(using = PersonSerializer.class) + static class Person { + String name; + } + +} diff --git a/test-functional/src/test/java/com/arangodb/serde/CustomTypeHintTest.java b/test-functional/src/test/java/com/arangodb/serde/CustomTypeHintTest.java new file mode 100644 index 000000000..06d0bfc35 --- /dev/null +++ b/test-functional/src/test/java/com/arangodb/serde/CustomTypeHintTest.java @@ -0,0 +1,144 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.serde; + + +import com.arangodb.ArangoCollection; +import com.arangodb.ArangoDB; +import com.arangodb.ArangoDatabase; +import com.arangodb.config.ConfigUtils; +import com.arangodb.model.DocumentCreateOptions; +import com.arangodb.serde.jackson.Key; +import com.fasterxml.jackson.annotation.JsonTypeInfo; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import static org.assertj.core.api.Assertions.assertThat; + + +/** + * @author Michele Rastelli + */ +class CustomTypeHintTest { + + private static final String COLLECTION_NAME = "collection"; + private ArangoDatabase db; + private ArangoCollection collection; + + @BeforeEach + void init() { + ArangoDB arangoDB = new ArangoDB.Builder() + .loadProperties(ConfigUtils.loadConfig()) + .build(); + + db = arangoDB.db("custom-serde-test"); + if (!db.exists()) { + db.create(); + } + + collection = db.collection(COLLECTION_NAME); + if (!collection.exists()) { + collection.create(); + } + } + + @AfterEach + void shutdown() { + if (db.exists()) + db.drop(); + } + + @Test + void insertDocument() { + Gorilla gorilla = new Gorilla(); + gorilla.setName("kingKong"); + + Zoo doc = new Zoo(); + doc.setAnimal(gorilla); + + Zoo insertedDoc = collection.insertDocument( + doc, + new DocumentCreateOptions().returnNew(true) + ).getNew(); + + assertThat((insertedDoc.getAnimal().getName())).isEqualTo("kingKong"); + + String key = insertedDoc.getKey(); + + // in the db a document like this is created: + // { + // "animal": { + // "type": "com.arangodb.serde.CustomTypeHintTest$Gorilla", + // "name": "kingKong" + // } + // } + + final Zoo readDoc = collection.getDocument( + key, + Zoo.class, + null); + + assertThat((readDoc.getAnimal().getName())).isEqualTo("kingKong"); + } + + @JsonTypeInfo(use = JsonTypeInfo.Id.CLASS, property = "type") + public interface Animal { + String getName(); + } + + public static class Gorilla implements Animal { + private String name; + + @Override + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + } + + public static class Zoo { + + @Key + private String key; + + private Animal animal; + + public String getKey() { + return key; + } + + public void setKey(String key) { + this.key = key; + } + + public Animal getAnimal() { + return animal; + } + + public void setAnimal(Animal animal) { + this.animal = animal; + } + } +} diff --git a/test-functional/src/test/java/com/arangodb/serde/JacksonConfigurationTest.java b/test-functional/src/test/java/com/arangodb/serde/JacksonConfigurationTest.java new file mode 100644 index 000000000..1a0f82122 --- /dev/null +++ b/test-functional/src/test/java/com/arangodb/serde/JacksonConfigurationTest.java @@ -0,0 +1,50 @@ +package com.arangodb.serde; + +import com.arangodb.ContentType; +import com.arangodb.internal.serde.InternalSerdeProvider; +import com.arangodb.serde.jackson.JacksonSerde; +import com.arangodb.util.SlowTest; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.EnumSource; + +import java.util.UUID; + +import static org.assertj.core.api.Assertions.assertThat; + +public class JacksonConfigurationTest { + + @SlowTest + @ParameterizedTest + @EnumSource(ContentType.class) + void bigStringInternalSerde(ContentType type) { + ArangoSerde s = new InternalSerdeProvider(type).create(); + + StringBuilder sb = new StringBuilder(); + while (sb.length() < 40_000_000) { + sb.append(UUID.randomUUID()); + } + String in = sb.toString(); + byte[] bytes = s.serialize(in); + String out = s.deserialize(bytes, String.class); + assertThat(out).isEqualTo(in); + } + + @SlowTest + @ParameterizedTest + @EnumSource(ContentType.class) + void bigStringUserSerde(ContentType type) { + ArangoSerde s = JacksonSerde.of(type); + + StringBuilder sb = new StringBuilder(); + while (sb.length() < 40_000_000) { + sb.append(UUID.randomUUID()); + } + String in = sb.toString(); + byte[] bytes = s.serialize(in); + String out = s.deserialize(bytes, String.class); + assertThat(out).isEqualTo(in); + } + + + +} diff --git a/test-functional/src/test/java/com/arangodb/serde/JacksonInterferenceTest.java b/test-functional/src/test/java/com/arangodb/serde/JacksonInterferenceTest.java new file mode 100644 index 000000000..d5a3a969c --- /dev/null +++ b/test-functional/src/test/java/com/arangodb/serde/JacksonInterferenceTest.java @@ -0,0 +1,240 @@ +package com.arangodb.serde; + +import com.arangodb.serde.jackson.*; +import com.arangodb.serde.jackson.json.JacksonJsonSerdeProvider; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.node.JsonNodeFactory; +import com.fasterxml.jackson.databind.node.ObjectNode; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.io.IOException; +import java.util.UUID; +import java.util.function.BiFunction; +import java.util.function.Function; + +import static org.assertj.core.api.Assertions.assertThat; + +/** + * NB: excluded from shaded tests + */ +class JacksonInterferenceTest { + + private final ObjectMapper mapper = new ObjectMapper(); + private final ArangoSerde serde = new JacksonJsonSerdeProvider().create(); + + private FooField fooField; + private FooProp fooProp; + + static class FooField { + @Id + public String myId; + @Key + public String myKey; + @Rev + public String myRev; + @From + public String myFrom; + @To + public String myTo; + } + + static class FooProp { + public String myId; + public String myKey; + public String myRev; + public String myFrom; + public String myTo; + + @Id + public String getMyId() { + return myId; + } + + @Id + public void setMyId(String myId) { + this.myId = myId; + } + + @Key + public String getMyKey() { + return myKey; + } + + @Key + public void setMyKey(String myKey) { + this.myKey = myKey; + } + + @Rev + public String getMyRev() { + return myRev; + } + + @Rev + public void setMyRev(String myRev) { + this.myRev = myRev; + } + + @From + public String getMyFrom() { + return myFrom; + } + + @From + public void setMyFrom(String myFrom) { + this.myFrom = myFrom; + } + + @To + public String getMyTo() { + return myTo; + } + + @To + public void setMyTo(String myTo) { + this.myTo = myTo; + } + } + + @BeforeEach + void init() { + fooField = new FooField(); + fooProp = new FooProp(); + + fooField.myId = "myId"; + fooProp.myId = "myId"; + + fooField.myKey = "myKey"; + fooProp.myKey = "myKey"; + + fooField.myRev = "myRev"; + fooProp.myRev = "myRev"; + + fooField.myFrom = "myFrom"; + fooProp.myFrom = "myFrom"; + + fooField.myTo = "myTo"; + fooProp.myTo = "myTo"; + } + + @Test + void serializeField() { + // id + testSerialize(fooField, "myId", fooField.myId, this::jacksonSerialize); + testSerialize(fooField, "_id", fooField.myId, this::serdeSerialize); + // key + testSerialize(fooField, "myKey", fooField.myKey, this::jacksonSerialize); + testSerialize(fooField, "_key", fooField.myKey, this::serdeSerialize); + // rev + testSerialize(fooField, "myRev", fooField.myRev, this::jacksonSerialize); + testSerialize(fooField, "_rev", fooField.myRev, this::serdeSerialize); + // from + testSerialize(fooField, "myFrom", fooField.myFrom, this::jacksonSerialize); + testSerialize(fooField, "_from", fooField.myFrom, this::serdeSerialize); + // to + testSerialize(fooField, "myTo", fooField.myTo, this::jacksonSerialize); + testSerialize(fooField, "_to", fooField.myTo, this::serdeSerialize); + } + + @Test + void serializeProp() { + // id + testSerialize(fooProp, "myId", fooProp.myId, this::jacksonSerialize); + testSerialize(fooProp, "_id", fooProp.myId, this::serdeSerialize); + // key + testSerialize(fooProp, "myKey", fooProp.myKey, this::jacksonSerialize); + testSerialize(fooProp, "_key", fooProp.myKey, this::serdeSerialize); + // rev + testSerialize(fooProp, "myRev", fooProp.myRev, this::jacksonSerialize); + testSerialize(fooProp, "_rev", fooProp.myRev, this::serdeSerialize); + // from + testSerialize(fooProp, "myFrom", fooProp.myFrom, this::jacksonSerialize); + testSerialize(fooProp, "_from", fooProp.myFrom, this::serdeSerialize); + // to + testSerialize(fooProp, "myTo", fooProp.myTo, this::jacksonSerialize); + testSerialize(fooProp, "_to", fooProp.myTo, this::serdeSerialize); + } + + @Test + void deserializeField() throws IOException { + // id + testDeserialize("myId", FooField.class, foo -> foo.myId, this::jacksonDeserialize); + testDeserialize("_id", FooField.class, foo -> foo.myId, this::serdeDeserialize); + // key + testDeserialize("myKey", FooField.class, foo -> foo.myKey, this::jacksonDeserialize); + testDeserialize("_key", FooField.class, foo -> foo.myKey, this::serdeDeserialize); + // rev + testDeserialize("myRev", FooField.class, foo -> foo.myRev, this::jacksonDeserialize); + testDeserialize("_rev", FooField.class, foo -> foo.myRev, this::serdeDeserialize); + // from + testDeserialize("myFrom", FooField.class, foo -> foo.myFrom, this::jacksonDeserialize); + testDeserialize("_from", FooField.class, foo -> foo.myFrom, this::serdeDeserialize); + // to + testDeserialize("myTo", FooField.class, foo -> foo.myTo, this::jacksonDeserialize); + testDeserialize("_to", FooField.class, foo -> foo.myTo, this::serdeDeserialize); + } + + @Test + void deserializeProp() throws IOException { + // id + testDeserialize("myId", FooProp.class, FooProp::getMyId, this::jacksonDeserialize); + testDeserialize("_id", FooProp.class, FooProp::getMyId, this::serdeDeserialize); + // key + testDeserialize("myKey", FooProp.class, FooProp::getMyKey, this::jacksonDeserialize); + testDeserialize("_key", FooProp.class, FooProp::getMyKey, this::serdeDeserialize); + // rev + testDeserialize("myRev", FooProp.class, FooProp::getMyRev, this::jacksonDeserialize); + testDeserialize("_rev", FooProp.class, FooProp::getMyRev, this::serdeDeserialize); + // from + testDeserialize("myFrom", FooProp.class, FooProp::getMyFrom, this::jacksonDeserialize); + testDeserialize("_from", FooProp.class, FooProp::getMyFrom, this::serdeDeserialize); + // to + testDeserialize("myTo", FooProp.class, FooProp::getMyTo, this::jacksonDeserialize); + testDeserialize("_to", FooProp.class, FooProp::getMyTo, this::serdeDeserialize); + } + + void testSerialize(Object data, String fieldName, String expectedValue, Function serializer) { + JsonNode jn = serializer.apply(data).get(fieldName); + assertThat(jn).isNotNull(); + assertThat(jn.textValue()).isEqualTo(expectedValue); + } + + void testDeserialize(String fieldName, Class clazz, Function getter, + BiFunction, T> deserializer) throws IOException { + String fieldValue = UUID.randomUUID().toString(); + ObjectNode on = JsonNodeFactory.instance.objectNode().put(fieldName, fieldValue); + byte[] bytes = mapper.writeValueAsBytes(on); + T deser = deserializer.apply(bytes, clazz); + assertThat(getter.apply(deser)).isEqualTo(fieldValue); + } + + private JsonNode jacksonSerialize(Object data) { + try { + return mapper.readTree(mapper.writeValueAsBytes(data)); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + private JsonNode serdeSerialize(Object data) { + try { + return mapper.readTree(serde.serialize(data)); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + private T jacksonDeserialize(byte[] bytes, Class clazz) { + try { + return mapper.readValue(bytes, clazz); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + private T serdeDeserialize(byte[] bytes, Class clazz) { + return serde.deserialize(bytes, clazz); + } +} diff --git a/test-functional/src/test/java/com/arangodb/serde/JsonBTypesTest.java b/test-functional/src/test/java/com/arangodb/serde/JsonBTypesTest.java new file mode 100644 index 000000000..7ce63baac --- /dev/null +++ b/test-functional/src/test/java/com/arangodb/serde/JsonBTypesTest.java @@ -0,0 +1,40 @@ +package com.arangodb.serde; + +import com.arangodb.ArangoDatabase; +import com.arangodb.BaseJunit5; +import jakarta.json.Json; +import jakarta.json.JsonObject; +import jakarta.json.JsonString; +import jakarta.json.JsonValue; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; + +import java.util.Collections; + +import static org.assertj.core.api.Assertions.assertThat; + +public class JsonBTypesTest extends BaseJunit5 { + + @BeforeAll + static void init() { + BaseJunit5.initDB(); + } + + @ParameterizedTest + @MethodSource("dbs") + void jsonNode(ArangoDatabase db) { + JsonObject doc = Json.createObjectBuilder() + .add("foo", "bar") + .build(); + JsonObject res = db.query("return @d", JsonObject.class, Collections.singletonMap("d", doc)).next(); + assertThat(res.size()).isEqualTo(1); + assertThat(res.getString("foo")).isEqualTo("bar"); + JsonValue value = db.query("return @d.foo", JsonValue.class, Collections.singletonMap("d", doc)).next(); + assertThat(value) + .isInstanceOf(JsonString.class) + .extracting(v -> ((JsonString) v).getString()) + .isEqualTo("bar"); + } + +} diff --git a/test-functional/src/test/java/com/arangodb/serde/SerdeTest.java b/test-functional/src/test/java/com/arangodb/serde/SerdeTest.java new file mode 100644 index 000000000..fd98e5e37 --- /dev/null +++ b/test-functional/src/test/java/com/arangodb/serde/SerdeTest.java @@ -0,0 +1,102 @@ +package com.arangodb.serde; + +import com.arangodb.ContentType; +import com.arangodb.entity.BaseDocument; +import com.arangodb.internal.serde.InternalSerde; +import com.arangodb.internal.serde.InternalSerdeProvider; +import com.arangodb.internal.serde.SerdeUtils; +import com.arangodb.util.RawBytes; +import com.arangodb.util.RawJson; +import com.fasterxml.jackson.databind.node.JsonNodeFactory; +import com.fasterxml.jackson.databind.node.ObjectNode; +import org.assertj.core.api.InstanceOfAssertFactories; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.EnumSource; + +import java.util.*; + +import static org.assertj.core.api.Assertions.assertThat; + + +class SerdeTest { + + @ParameterizedTest + @EnumSource(ContentType.class) + void rawJsonSerde(ContentType type) { + InternalSerde s = new InternalSerdeProvider(type).create(); + ObjectNode node = JsonNodeFactory.instance.objectNode().put("foo", "bar"); + RawJson raw = RawJson.of(SerdeUtils.INSTANCE.writeJson(node)); + byte[] serialized = s.serialize(raw); + RawJson deserialized = s.deserialize(serialized, RawJson.class); + assertThat(deserialized).isEqualTo(raw); + } + + @ParameterizedTest + @EnumSource(ContentType.class) + void rawBytesSerde(ContentType type) { + InternalSerde s = new InternalSerdeProvider(type).create(); + ObjectNode node = JsonNodeFactory.instance.objectNode().put("foo", "bar"); + RawBytes raw = RawBytes.of(s.serialize(node)); + byte[] serialized = s.serializeUserData(raw); + RawBytes deserialized = s.deserializeUserData(serialized, RawBytes.class); + assertThat(deserialized).isEqualTo(raw); + } + + @ParameterizedTest + @EnumSource(ContentType.class) + void deserializeBaseDocumentWithNestedProperties(ContentType type) { + InternalSerde s = new InternalSerdeProvider(type).create(); + RawJson json = RawJson.of("{\"foo\":\"aaa\",\"properties\":{\"foo\":\"bbb\"}}"); + BaseDocument deserialized = s.deserialize(s.serialize(json), BaseDocument.class); + assertThat(deserialized.getAttribute("foo")).isEqualTo("aaa"); + assertThat(deserialized.getAttribute("properties")) + .isInstanceOf(Map.class) + .asInstanceOf(InstanceOfAssertFactories.MAP) + .containsEntry("foo", "bbb"); + } + + @ParameterizedTest + @EnumSource(ContentType.class) + void serializeBaseDocumentWithNestedProperties(ContentType type) { + InternalSerde s = new InternalSerdeProvider(type).create(); + BaseDocument doc = new BaseDocument(); + doc.addAttribute("foo", "aaa"); + doc.addAttribute("properties", Collections.singletonMap("foo", "bbb")); + byte[] ser = s.serialize(doc); + ObjectNode on = s.deserializeUserData(ser, ObjectNode.class); + assertThat(on.get("foo").textValue()).isEqualTo("aaa"); + assertThat(on.get("properties").get("foo").textValue()).isEqualTo("bbb"); + } + + @ParameterizedTest + @EnumSource(ContentType.class) + void deserializeNull(ContentType type) { + InternalSerde s = new InternalSerdeProvider(type).create(); + Void deser = s.deserialize((byte[]) null, Void.class); + assertThat(deser).isNull(); + } + + @ParameterizedTest + @EnumSource(ContentType.class) + void deserializeNullUserSerde(ContentType type) { + ArangoSerde s = ArangoSerdeProvider.of(type).create(); + Void deser = s.deserialize(null, Void.class); + assertThat(deser).isNull(); + } + + @ParameterizedTest + @EnumSource(ContentType.class) + void deserializeEmpty(ContentType type) { + InternalSerde s = new InternalSerdeProvider(type).create(); + Void deser = s.deserialize(new byte[0], Void.class); + assertThat(deser).isNull(); + } + + @ParameterizedTest + @EnumSource(ContentType.class) + void deserializeEmptyUserSerde(ContentType type) { + ArangoSerde s = ArangoSerdeProvider.of(type).create(); + Void deser = s.deserialize(new byte[0], Void.class); + assertThat(deser).isNull(); + } +} diff --git a/src/main/java/com/arangodb/util/MapBuilder.java b/test-functional/src/test/java/com/arangodb/util/MapBuilder.java similarity index 71% rename from src/main/java/com/arangodb/util/MapBuilder.java rename to test-functional/src/test/java/com/arangodb/util/MapBuilder.java index d296ef513..61d05865e 100644 --- a/src/main/java/com/arangodb/util/MapBuilder.java +++ b/test-functional/src/test/java/com/arangodb/util/MapBuilder.java @@ -1,47 +1,46 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.util; - -import java.util.LinkedHashMap; -import java.util.Map; - -/** - * @author Mark Vollmary - * - */ -public class MapBuilder { - - private final Map map; - - public MapBuilder() { - super(); - map = new LinkedHashMap(); - } - - public MapBuilder put(final String key, final Object value) { - map.put(key, value); - return this; - } - - public Map get() { - return map; - } -} +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.util; + +import java.util.LinkedHashMap; +import java.util.Map; + +/** + * @author Mark Vollmary + */ +public class MapBuilder { + + private final Map map; + + public MapBuilder() { + super(); + map = new LinkedHashMap<>(); + } + + public MapBuilder put(final String key, final Object value) { + map.put(key, value); + return this; + } + + public Map get() { + return map; + } +} diff --git a/test-functional/src/test/java/com/arangodb/util/MapBuilderTest.java b/test-functional/src/test/java/com/arangodb/util/MapBuilderTest.java new file mode 100644 index 000000000..0496a685a --- /dev/null +++ b/test-functional/src/test/java/com/arangodb/util/MapBuilderTest.java @@ -0,0 +1,43 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.util; + + +import org.junit.jupiter.api.Test; + +import java.util.Map; + +import static org.assertj.core.api.Assertions.assertThat; + + +/** + * @author Mark Vollmary + */ +class MapBuilderTest { + + @Test + void build() { + final Map map = new MapBuilder().put("foo", "bar").get(); + assertThat(map).hasSize(1); + assertThat(map.get("foo")).isNotNull(); + assertThat(map).containsEntry("foo", "bar"); + } +} diff --git a/test-functional/src/test/java/com/arangodb/util/SlowTest.java b/test-functional/src/test/java/com/arangodb/util/SlowTest.java new file mode 100644 index 000000000..cc74f7f21 --- /dev/null +++ b/test-functional/src/test/java/com/arangodb/util/SlowTest.java @@ -0,0 +1,14 @@ +package com.arangodb.util; + +import org.junit.jupiter.api.condition.EnabledIfSystemProperty; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +@Target({ElementType.TYPE, ElementType.METHOD}) +@Retention(RetentionPolicy.RUNTIME) +@EnabledIfSystemProperty(named = "enableSlowTests", matches = "true") +public @interface SlowTest { +} diff --git a/test-functional/src/test/java/com/arangodb/util/TestUtils.java b/test-functional/src/test/java/com/arangodb/util/TestUtils.java new file mode 100644 index 000000000..978fe29ee --- /dev/null +++ b/test-functional/src/test/java/com/arangodb/util/TestUtils.java @@ -0,0 +1,110 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + + +package com.arangodb.util; + + +import java.util.ArrayList; +import java.util.List; +import java.util.Random; +import java.util.UUID; + +/** + * @author Michele Rastelli + */ +public final class TestUtils { + public static final String TEST_DB = "java_driver_test_db"; + private static final String[] allChars = TestUtils.generateAllInputChars(); + private static final Random r = new Random(); + + private TestUtils() { + } + + /** + * Parses {@param version} and checks whether it is greater or equal to <{@param otherMajor}, {@param otherMinor}, + * {@param otherPatch}> comparing the corresponding version components in lexicographical order. + */ + public static boolean isAtLeastVersion(final String version, final int otherMajor, final int otherMinor, + final int otherPatch) { + return compareVersion(version, otherMajor, otherMinor, otherPatch) >= 0; + } + + /** + * Parses {@param version} and checks whether it is less than <{@param otherMajor}, {@param otherMinor}, + * {@param otherPatch}> comparing the corresponding version components in lexicographical order. + */ + public static boolean isLessThanVersion(final String version, final int otherMajor, final int otherMinor, + final int otherPatch) { + return compareVersion(version, otherMajor, otherMinor, otherPatch) < 0; + } + + private static int compareVersion(final String version, final int otherMajor, final int otherMinor, + final int otherPatch) { + String[] parts = version.split("-")[0].split("\\."); + + int major = Integer.parseInt(parts[0]); + int minor = Integer.parseInt(parts[1]); + int patch = Integer.parseInt(parts[2]); + + int majorComparison = Integer.compare(major, otherMajor); + if (majorComparison != 0) { + return majorComparison; + } + + int minorComparison = Integer.compare(minor, otherMinor); + if (minorComparison != 0) { + return minorComparison; + } + + return Integer.compare(patch, otherPatch); + } + + private static String[] generateAllInputChars() { + List list = new ArrayList<>(); + for (int codePoint = 0; codePoint < Character.MAX_CODE_POINT + 1; codePoint++) { + String s = new String(Character.toChars(codePoint)); + if (codePoint == 47 || // '/' + codePoint == 58 || // ':' + Character.isISOControl(codePoint) || + Character.isLowSurrogate(s.charAt(0)) || + (Character.isHighSurrogate(s.charAt(0)) && s.length() == 1)) { + continue; + } + list.add(s); + } + return list.toArray(new String[0]); + } + + public static String generateRandomName(boolean extendedNames, int length) { + if (extendedNames) { + int max = allChars.length; + StringBuilder sb = new StringBuilder(); + for (int i = 0; i < length; i++) { + String allChar = allChars[r.nextInt(max)]; + sb.append(allChar); + } + return UnicodeUtils.normalize(sb.toString()); + } else { + return UUID.randomUUID().toString(); + } + } + +} diff --git a/test-functional/src/test/resources/META-INF/native-image/native-image.properties b/test-functional/src/test/resources/META-INF/native-image/native-image.properties new file mode 100644 index 000000000..0f53a7a15 --- /dev/null +++ b/test-functional/src/test/resources/META-INF/native-image/native-image.properties @@ -0,0 +1,46 @@ +Args=\ + -Dio.netty.noUnsafe=true \ + -Dio.netty.leakDetection.level=DISABLED \ + -H:+AllowDeprecatedBuilderClassesOnImageClasspath \ + -H:ResourceConfigurationResources=${.}/resource-config.json \ + -H:ReflectionConfigurationResources=${.}/reflect-config.json \ + -H:SerializationConfigurationResources=${.}/serialization-config.json \ + --initialize-at-build-time=\ + org.slf4j,\ + org.junit.platform.engine.TestTag,\ + io.netty \ + --initialize-at-run-time=\ + io.netty.buffer.PooledByteBufAllocator,\ + io.netty.buffer.ByteBufAllocator,\ + io.netty.buffer.ByteBufUtil,\ + io.netty.buffer.AbstractReferenceCountedByteBuf,\ + io.netty.handler.ssl.JdkSslServerContext,\ + io.netty.handler.codec.compression.BrotliDecoder,\ + io.netty.handler.codec.compression.ZstdConstants,\ + io.netty.handler.codec.http2.Http2CodecUtil,\ + io.netty.handler.codec.http2.Http2ClientUpgradeCodec,\ + io.netty.handler.codec.http2.Http2ConnectionHandler,\ + io.netty.handler.codec.http2.DefaultHttp2FrameWriter,\ + io.netty.handler.codec.http.HttpObjectEncoder,\ + io.netty.handler.codec.http.websocketx.WebSocket00FrameEncoder,\ + io.netty.handler.codec.http.websocketx.extensions.compression.DeflateDecoder,\ + io.netty.handler.codec.http2.CleartextHttp2ServerUpgradeHandler,\ + io.netty.handler.codec.http2.Http2ServerUpgradeCodec,\ + io.netty.handler.pcap.PcapWriteHandler$WildcardAddressHolder,\ + io.netty.util.AbstractReferenceCounted,\ + io.netty.util.concurrent.GlobalEventExecutor,\ + io.netty.util.concurrent.ImmediateEventExecutor,\ + io.netty.util.concurrent.ScheduledFutureTask,\ + io.netty.util.internal.ThreadLocalRandom,\ + io.netty.util.NetUtilSubstitutions$NetUtilLocalhost4LazyHolder,\ + io.netty.util.NetUtilSubstitutions$NetUtilLocalhost6LazyHolder,\ + io.netty.util.NetUtilSubstitutions$NetUtilLocalhostLazyHolder,\ + io.netty.util.NetUtilSubstitutions$NetUtilNetworkInterfacesLazyHolder,\ + io.netty.handler.ssl.util.ThreadLocalInsecureRandom,\ + io.netty.resolver.dns.DefaultDnsServerAddressStreamProvider,\ + io.netty.resolver.dns.DnsServerAddressStreamProviders$DefaultProviderHolder,\ + io.netty.resolver.dns.DnsNameResolver,\ + io.netty.resolver.HostsFileEntriesResolver,\ + io.netty.resolver.dns.ResolvConf$ResolvConfLazy,\ + io.netty.resolver.dns.DefaultDnsServerAddressStreamProvider,\ + io.vertx.core.buffer.impl.VertxByteBufAllocator diff --git a/test-functional/src/test/resources/META-INF/native-image/reflect-config.json b/test-functional/src/test/resources/META-INF/native-image/reflect-config.json new file mode 100644 index 000000000..cc3e412db --- /dev/null +++ b/test-functional/src/test/resources/META-INF/native-image/reflect-config.json @@ -0,0 +1,314 @@ +[ + { + "name": "org.junit.jupiter.engine.extension.TimeoutInvocationFactory$SingleThreadExecutorResource", + "methods": [ + { + "name": "", + "parameterTypes": [] + } + ] + }, + { + "name": "org.junit.jupiter.engine.extension.TimeoutInvocationFactory$SingleThreadExecutorResource", + "methods": [ + { + "name": "", + "parameterTypes": [] + } + ] + }, + { + "name": "com.arangodb.ArangoCollectionTest$TestUpdateEntity", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredConstructors": true, + "allDeclaredClasses": true + }, + { + "name": "com.arangodb.ArangoCollectionTest$TestUpdateEntitySerializeNullFalse", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredConstructors": true, + "allDeclaredClasses": true + }, + { + "name": "com.arangodb.ArangoDatabaseTest$TransactionTestEntity", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredConstructors": true, + "allDeclaredClasses": true + }, + { + "name": "com.arangodb.example.document.AqlQueryWithSpecialReturnTypesExampleTest$Gender", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true + }, + { + "name": "com.arangodb.example.document.TestEntity", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredConstructors": true, + "allDeclaredClasses": true + }, + { + "name": "com.arangodb.example.graph.AQLActorsAndMoviesExampleTest$Actor", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredConstructors": true, + "allDeclaredClasses": true + }, + { + "name": "com.arangodb.example.graph.AQLActorsAndMoviesExampleTest$Movie", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredConstructors": true, + "allDeclaredClasses": true + }, + { + "name": "com.arangodb.example.graph.Circle", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredConstructors": true, + "allDeclaredClasses": true + }, + { + "name": "com.arangodb.example.graph.CircleEdge", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredConstructors": true, + "allDeclaredClasses": true + }, + { + "name": "com.arangodb.example.graph.ShortestPathInAQLExampleTest$Pair", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredConstructors": true, + "allDeclaredClasses": true + }, + { + "name": "com.arangodb.serde.CustomSerdeTest$Person", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true + }, + { + "name": "com.arangodb.serde.CustomSerdeTest$PersonDeserializer", + "allDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true + }, + { + "name": "com.arangodb.serde.CustomSerdeTest$PersonSerializer", + "allDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "methods": [ + { + "name": "", + "parameterTypes": [] + } + ] + }, + { + "name": "com.arangodb.serde.CustomSerdeAsyncTest$Person", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true + }, + { + "name": "com.arangodb.serde.CustomSerdeAsyncTest$PersonDeserializer", + "allDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true + }, + { + "name": "com.arangodb.serde.CustomSerdeAsyncTest$PersonSerializer", + "allDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "methods": [ + { + "name": "", + "parameterTypes": [] + } + ] + }, + { + "name": "com.arangodb.serde.CustomTypeHintTest$Animal", + "allDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true + }, + { + "name": "com.arangodb.serde.CustomTypeHintTest$Gorilla", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredConstructors": true, + "allDeclaredClasses": true + }, + { + "name": "com.arangodb.serde.CustomTypeHintTest$Zoo", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredConstructors": true, + "allDeclaredClasses": true + }, + { + "name": "java.util.HashSet", + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.arangodb.mapping.annotations.AnnotatedEntity", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.fasterxml.jackson.databind.deser.std.DateDeserializers$SqlDateDeserializer", + "methods": [ + { + "name": "", + "parameterTypes": [] + } + ] + }, + { + "name": "com.fasterxml.jackson.databind.ser.std.SqlDateSerializer", + "methods": [ + { + "name": "", + "parameterTypes": [] + } + ] + }, + { + "name": "com.arangodb.ArangoCollectionTest$Animal", + "allDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true + }, + { + "name": "com.arangodb.ArangoCollectionTest$AnnotatedEntity", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredConstructors": true, + "allDeclaredClasses": true + }, + { + "name": "com.arangodb.ArangoCollectionTest$Cat", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredConstructors": true, + "allDeclaredClasses": true + }, + { + "name": "com.arangodb.ArangoCollectionTest$Dog", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredConstructors": true, + "allDeclaredClasses": true + }, + { + "name": "com.arangodb.serde.JacksonInterferenceTest", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredConstructors": true, + "allDeclaredClasses": true + }, + { + "name": "com.arangodb.serde.JacksonInterferenceTest$FooField", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredConstructors": true, + "allDeclaredClasses": true + }, + { + "name": "com.arangodb.serde.JacksonInterferenceTest$FooProp", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredConstructors": true, + "allDeclaredClasses": true + }, + { + "name": "com.arangodb.ArangoCollectionAsyncTest$TestUpdateEntity", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredConstructors": true, + "allDeclaredClasses": true + }, + { + "name": "com.arangodb.ArangoCollectionAsyncTest$TestUpdateEntitySerializeNullFalse", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredConstructors": true, + "allDeclaredClasses": true + }, + { + "name": "com.arangodb.ArangoDatabaseAsyncTest$TransactionTestEntity", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredConstructors": true, + "allDeclaredClasses": true + }, + { + "name": "com.arangodb.ArangoCollectionAsyncTest$Animal", + "allDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true + }, + { + "name": "com.arangodb.ArangoCollectionAsyncTest$AnnotatedEntity", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredConstructors": true, + "allDeclaredClasses": true + }, + { + "name": "com.arangodb.ArangoCollectionAsyncTest$Cat", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredConstructors": true, + "allDeclaredClasses": true + }, + { + "name": "com.arangodb.ArangoCollectionAsyncTest$Dog", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredConstructors": true, + "allDeclaredClasses": true + }, + { + "name": "com.arangodb.RequestContextTest$Person", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + } +] diff --git a/test-functional/src/test/resources/META-INF/native-image/resource-config.json b/test-functional/src/test/resources/META-INF/native-image/resource-config.json new file mode 100644 index 000000000..9d96e052d --- /dev/null +++ b/test-functional/src/test/resources/META-INF/native-image/resource-config.json @@ -0,0 +1,25 @@ +{ + "resources": { + "includes": [ + { + "pattern": "\\Qarangodb.properties\\E" + }, + { + "pattern": "\\Qarangodb-ssl.properties\\E" + }, + { + "pattern": "\\Qarangodb-bad.properties\\E" + }, + { + "pattern": "\\Qarangodb-bad2.properties\\E" + }, + { + "pattern": "\\Qlogback-test.xml\\E" + }, + { + "pattern": "\\Qexample.truststore\\E" + } + ] + }, + "bundles": [] +} diff --git a/test-functional/src/test/resources/META-INF/native-image/serialization-config.json b/test-functional/src/test/resources/META-INF/native-image/serialization-config.json new file mode 100644 index 000000000..08c51a059 --- /dev/null +++ b/test-functional/src/test/resources/META-INF/native-image/serialization-config.json @@ -0,0 +1,32 @@ +[ + { + "name": "java.lang.Exception" + }, + { + "name": "java.lang.RuntimeException" + }, + { + "name": "java.lang.StackTraceElement" + }, + { + "name": "java.lang.StackTraceElement[]" + }, + { + "name": "java.lang.String" + }, + { + "name":"java.lang.Integer" + }, + { + "name":"java.lang.Number" + }, + { + "name": "java.lang.Throwable" + }, + { + "name": "java.util.Collections$EmptyList" + }, + { + "name": "java.util.Collections$SingletonList" + } +] diff --git a/test-functional/src/test/resources/allure.properties b/test-functional/src/test/resources/allure.properties new file mode 100644 index 000000000..80b02dde9 --- /dev/null +++ b/test-functional/src/test/resources/allure.properties @@ -0,0 +1 @@ +allure.results.directory=target/allure-results diff --git a/test-functional/src/test/resources/arangodb-bad.properties b/test-functional/src/test/resources/arangodb-bad.properties new file mode 100644 index 000000000..8a45d9ee0 --- /dev/null +++ b/test-functional/src/test/resources/arangodb-bad.properties @@ -0,0 +1 @@ +arangodb.hosts=172.28.0.1:8529,172.28.0.1:fail \ No newline at end of file diff --git a/test-functional/src/test/resources/arangodb-ssl.properties b/test-functional/src/test/resources/arangodb-ssl.properties new file mode 100644 index 000000000..eb0c74f48 --- /dev/null +++ b/test-functional/src/test/resources/arangodb-ssl.properties @@ -0,0 +1,7 @@ +arangodb.hosts=172.28.0.1:8529 +arangodb.password=test +arangodb.useSsl=true +arangodb.sslCertValue=MIIDezCCAmOgAwIBAgIEeDCzXzANBgkqhkiG9w0BAQsFADBuMRAwDgYDVQQGEwdVbmtub3duMRAwDgYDVQQIEwdVbmtub3duMRAwDgYDVQQHEwdVbmtub3duMRAwDgYDVQQKEwdVbmtub3duMRAwDgYDVQQLEwdVbmtub3duMRIwEAYDVQQDEwlsb2NhbGhvc3QwHhcNMjAxMTAxMTg1MTE5WhcNMzAxMDMwMTg1MTE5WjBuMRAwDgYDVQQGEwdVbmtub3duMRAwDgYDVQQIEwdVbmtub3duMRAwDgYDVQQHEwdVbmtub3duMRAwDgYDVQQKEwdVbmtub3duMRAwDgYDVQQLEwdVbmtub3duMRIwEAYDVQQDEwlsb2NhbGhvc3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC1WiDnd4+uCmMG539ZNZB8NwI0RZF3sUSQGPx3lkqaFTZVEzMZL76HYvdc9Qg7difyKyQ09RLSpMALX9euSseD7bZGnfQH52BnKcT09eQ3wh7aVQ5sN2omygdHLC7X9usntxAfv7NzmvdogNXoJQyY/hSZff7RIqWH8NnAUKkjqOe6Bf5LDbxHKESmrFBxOCOnhcpvZWetwpiRdJVPwUn5P82CAZzfiBfmBZnB7D0l+/6Cv4jMuH26uAIcixnVekBQzl1RgwczuiZf2MGO64vDMMJJWE9ClZF1uQuQrwXF6qwhuP1Hnkii6wNbTtPWlGSkqeutr004+Hzbf8KnRY4PAgMBAAGjITAfMB0GA1UdDgQWBBTBrv9Awynt3C5IbaCNyOW5v4DNkTANBgkqhkiG9w0BAQsFAAOCAQEAIm9rPvDkYpmzpSIhR3VXG9Y71gxRDrqkEeLsMoEyqGnw/zx1bDCNeGg2PncLlW6zTIipEBooixIE9U7KxHgZxBy0Et6EEWvIUmnr6F4F+dbTD050GHlcZ7eOeqYTPYeQC502G1Fo4tdNi4lDP9L9XZpf7Q1QimRH2qaLS03ZFZa2tY7ah/RQqZL8Dkxx8/zc25sgTHVpxoK853glBVBs/ENMiyGJWmAXQayewY3EPt/9wGwV4KmU3dPDleQeXSUGPUISeQxFjy+jCw21pYviWVJTNBA9l5ny3GhEmcnOT/gQHCvVRLyGLMbaMZ4JrPwb+aAtBgrgeiK4xeSMMvrbhw== +arangodb.sslAlgorithm=SunX509 +arangodb.sslProtocol=TLS +arangodb.verifyHost=false diff --git a/test-functional/src/test/resources/arangodb.properties b/test-functional/src/test/resources/arangodb.properties new file mode 100644 index 000000000..b671d4155 --- /dev/null +++ b/test-functional/src/test/resources/arangodb.properties @@ -0,0 +1,3 @@ +arangodb.hosts=172.28.0.1:8529 +arangodb.password=test +arangodb.acquireHostList=true diff --git a/test-functional/src/test/resources/example.truststore b/test-functional/src/test/resources/example.truststore new file mode 100644 index 000000000..e683a48b8 Binary files /dev/null and b/test-functional/src/test/resources/example.truststore differ diff --git a/test-functional/src/test/resources/simplelogger.properties b/test-functional/src/test/resources/simplelogger.properties new file mode 100644 index 000000000..a2a4ce6d5 --- /dev/null +++ b/test-functional/src/test/resources/simplelogger.properties @@ -0,0 +1,14 @@ +org.slf4j.simpleLogger.logFile=System.out +org.slf4j.simpleLogger.showDateTime=true +org.slf4j.simpleLogger.dateTimeFormat=HH:mm:ss.SSS +org.slf4j.simpleLogger.showThreadName=true +org.slf4j.simpleLogger.showLogName=true +org.slf4j.simpleLogger.showShortLogName=false + + +org.slf4j.simpleLogger.defaultLogLevel=info +#org.slf4j.simpleLogger.log.com.arangodb.internal.serde.JacksonUtils=debug +#org.slf4j.simpleLogger.log.com.arangodb.internal.net.Communication=debug +#org.slf4j.simpleLogger.log.com.arangodb.internal.serde.InternalSerdeImpl=debug +#org.slf4j.simpleLogger.log.io.netty.handler.logging.LoggingHandler=debug +#org.slf4j.simpleLogger.log.io.netty.handler.codec.http2.Http2FrameLogger=debug diff --git a/test-non-functional/pom.xml b/test-non-functional/pom.xml new file mode 100644 index 000000000..b28d63c88 --- /dev/null +++ b/test-non-functional/pom.xml @@ -0,0 +1,108 @@ + + + 4.0.0 + + + ../test-parent + com.arangodb + test-parent + 7.22.0 + + + test-non-functional + + + 17 + 17 + 17 + + + + + + com.arangodb + jsonb-serde + compile + + + org.eclipse + yasson + test + + + com.tngtech.archunit + archunit-junit5 + test + + + org.graalvm.sdk + graal-sdk + ${graalvm.version} + test + + + org.graalvm.truffle + truffle-api + ${graalvm.version} + test + + + org.graalvm.polyglot + js + ${graalvm.version} + pom + test + + + io.smallrye.config + smallrye-config-core + 3.13.1 + test + + + javax.annotation + javax.annotation-api + 1.3.2 + test + + + + + + shaded + + + shaded + true + + + + + + com.google.code.maven-replacer-plugin + replacer + + + + com.fasterxml.jackson.databind.JsonNode + com.arangodb.shaded.fasterxml.jackson.databind.JsonNode + + + com.fasterxml.jackson.databind.ObjectNode + com.arangodb.shaded.fasterxml.jackson.databind.ObjectNode + + + com.fasterxml.jackson.databind.node.JsonNodeFactory + com.arangodb.shaded.fasterxml.jackson.databind.node.JsonNodeFactory + + + + + + + + + + \ No newline at end of file diff --git a/test-non-functional/src/test/java/CommunicationTest.java b/test-non-functional/src/test/java/CommunicationTest.java new file mode 100644 index 000000000..158a1b3d8 --- /dev/null +++ b/test-non-functional/src/test/java/CommunicationTest.java @@ -0,0 +1,75 @@ +import com.arangodb.*; +import com.arangodb.config.ArangoConfigProperties; +import org.junit.jupiter.api.Timeout; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.EnumSource; +import util.TestUtils; + +import java.io.IOException; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.catchThrowable; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + +public class CommunicationTest { + + @ParameterizedTest + @EnumSource(Protocol.class) + @Timeout(5) + void disconnectAsync(Protocol protocol) throws InterruptedException, ExecutionException { + assumeTrue(!Protocol.VST.equals(protocol)); + + ArangoDBAsync arangoDB = new ArangoDB.Builder() + .loadProperties(ArangoConfigProperties.fromFile()) + .protocol(protocol) + .serde(TestUtils.createSerde(protocol)) + .build() + .async(); + arangoDB.getVersion().get(); + + CompletableFuture> result = arangoDB.db().query("return sleep(1)", null); + Thread.sleep(500); + arangoDB.shutdown(); + Throwable thrown = catchThrowable(result::get).getCause(); + assertThat(thrown) + .isNotNull() + .isInstanceOf(ArangoDBException.class); + assertThat(thrown.getCause()) + .isInstanceOf(IOException.class) + .hasMessageContaining("closed"); + } + + @ParameterizedTest + @EnumSource(Protocol.class) + @Timeout(5) + void disconnect(Protocol protocol) { + assumeTrue(!Protocol.VST.equals(protocol)); + + ArangoDB arangoDB = new ArangoDB.Builder() + .loadProperties(ArangoConfigProperties.fromFile()) + .protocol(protocol) + .serde(TestUtils.createSerde(protocol)) + .build(); + arangoDB.getVersion(); + + new Thread(() -> { + try { + Thread.sleep(500); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + arangoDB.shutdown(); + }).start(); + + Throwable thrown = catchThrowable(() -> arangoDB.db().query("return sleep(1)", null)); + assertThat(thrown) + .isNotNull() + .isInstanceOf(ArangoDBException.class); + assertThat(thrown.getCause()) + .isInstanceOf(IOException.class) + .hasMessageContaining("closed"); + } + +} diff --git a/test-non-functional/src/test/java/ConfigurationTest.java b/test-non-functional/src/test/java/ConfigurationTest.java new file mode 100644 index 000000000..b004c9b38 --- /dev/null +++ b/test-non-functional/src/test/java/ConfigurationTest.java @@ -0,0 +1,49 @@ +import com.arangodb.ArangoDB; +import com.arangodb.ContentType; +import com.arangodb.config.ArangoConfigProperties; +import com.arangodb.entity.ArangoDBVersion; +import com.arangodb.serde.jackson.JacksonSerde; +import org.junit.jupiter.api.Test; + +import java.util.Properties; + +import static org.assertj.core.api.Assertions.assertThat; + +public class ConfigurationTest { + + @Test + void fallbackHost() { + final ArangoDB arangoDB = new ArangoDB.Builder() + .loadProperties(ArangoConfigProperties.fromFile()) + .serde(JacksonSerde.of(ContentType.JSON)) + .host("not-accessible", 8529) + .host("172.28.0.1", 8529) + .build(); + final ArangoDBVersion version = arangoDB.getVersion(); + assertThat(version).isNotNull(); + } + + @Test + void loadPropertiesWithPrefix() { + ArangoDB adb = new ArangoDB.Builder() + .loadProperties(ArangoConfigProperties.fromFile("arangodb-with-prefix.properties", "adb")) + .serde(JacksonSerde.of(ContentType.JSON)) + .build(); + adb.getVersion(); + adb.shutdown(); + } + + @Test + void loadConfigFromPropertiesWithPrefix() { + Properties props = new Properties(); + props.setProperty("adb.hosts", "172.28.0.1:8529"); + props.setProperty("adb.password", "test"); + ArangoDB adb = new ArangoDB.Builder() + .loadProperties(ArangoConfigProperties.fromProperties(props, "adb")) + .serde(JacksonSerde.of(ContentType.JSON)) + .build(); + adb.getVersion(); + adb.shutdown(); + } + +} diff --git a/test-non-functional/src/test/java/arch/ArchUtils.java b/test-non-functional/src/test/java/arch/ArchUtils.java new file mode 100644 index 000000000..5c80edd0f --- /dev/null +++ b/test-non-functional/src/test/java/arch/ArchUtils.java @@ -0,0 +1,59 @@ +package arch; + +import com.arangodb.arch.NoRawTypesInspection; +import com.tngtech.archunit.base.ChainableFunction; +import com.tngtech.archunit.base.DescribedPredicate; +import com.tngtech.archunit.core.domain.JavaClass; +import com.tngtech.archunit.core.domain.JavaType; +import com.tngtech.archunit.core.domain.properties.HasReturnType; +import com.tngtech.archunit.core.domain.properties.HasType; + + +class ArchUtils { + + static class JavaTypeExt { + static DescribedPredicate rawTypes(DescribedPredicate predicate) { + return new DescribedPredicate<>("raw types " + predicate.getDescription()) { + @Override + public boolean test(JavaType t) { + if (t.toErasure().isAnnotatedWith(NoRawTypesInspection.class)) { + return predicate.test(t.toErasure()); + } else { + return t.getAllInvolvedRawTypes().stream().allMatch(predicate); + } + } + }; + } + } + + static class HasReturnTypeExt { + private static final ChainableFunction GET_RETURN_TYPE = new ChainableFunction<>() { + @Override + public JavaType apply(HasReturnType input) { + return input.getReturnType(); + } + }; + + static DescribedPredicate returnType(DescribedPredicate predicate) { + return predicate.onResultOf(GET_RETURN_TYPE).as("return type %s", predicate.getDescription()); + } + } + + static class HasTypeExt { + private static final ChainableFunction GET_TYPE = new ChainableFunction<>() { + @Override + public JavaType apply(HasType input) { + return input.getType(); + } + }; + + static DescribedPredicate type(DescribedPredicate predicate) { + return GET_TYPE.is(predicate).as("type " + predicate.getDescription()); + } + + static DescribedPredicate rawTypes(DescribedPredicate predicate) { + return type(JavaTypeExt.rawTypes(predicate)); + } + } + +} diff --git a/test-non-functional/src/test/java/arch/InternalsTest.java b/test-non-functional/src/test/java/arch/InternalsTest.java new file mode 100644 index 000000000..e46d0ab29 --- /dev/null +++ b/test-non-functional/src/test/java/arch/InternalsTest.java @@ -0,0 +1,173 @@ +package arch; + +import com.arangodb.arch.UnstableApi; +import com.arangodb.arch.UsedInApi; +import com.tngtech.archunit.base.DescribedPredicate; +import com.tngtech.archunit.core.domain.*; +import com.tngtech.archunit.core.importer.ImportOption; +import com.tngtech.archunit.junit.AnalyzeClasses; +import com.tngtech.archunit.junit.ArchTest; +import com.tngtech.archunit.lang.ArchRule; + +import java.util.function.Function; +import java.util.stream.Stream; + +import static arch.ArchUtils.*; +import static com.tngtech.archunit.base.DescribedPredicate.*; +import static com.tngtech.archunit.core.domain.JavaClass.Predicates.assignableTo; +import static com.tngtech.archunit.core.domain.JavaClass.Predicates.resideInAPackage; +import static com.tngtech.archunit.core.domain.properties.CanBeAnnotated.Predicates.annotatedWith; +import static com.tngtech.archunit.lang.conditions.ArchConditions.be; +import static com.tngtech.archunit.lang.syntax.ArchRuleDefinition.*; + +@AnalyzeClasses(packages = "com.arangodb..", importOptions = {ImportOption.DoNotIncludeTests.class}) +public class InternalsTest { + + /** + * Elements of public API are from all packages under {@link com.arangodb} except: + * - internal packages + * - dependencies packages + */ + private static final DescribedPredicate packageFilter = + and( + not(JavaClass.Predicates.resideInAnyPackage( + "..internal..", + "com.arangodb.jackson..", + "com.arangodb.velocypack..", + "com.arangodb.shaded..") + ) + ); + + /** + * Tests whether the type and all its raw generic types do not extend or implement internal classes + */ + private static final DescribedPredicate typePredicate = + JavaTypeExt.rawTypes(not(assignableTo(resideInAPackage("..internal..")))); + + /** + * Superclasses of types used in public API must either: + * - not reside in internal packages, or + * - be annotated with {@link UsedInApi} + */ + private static final DescribedPredicate superclassesPredicate = + JavaTypeExt.rawTypes(superclasses(or( + not(resideInAPackage("..internal..")), + annotatedWith(UsedInApi.class) + ))); + + /** + * Classes in the public API must either: + * - not extend or implement internal classes, or + * - be annotated with {@link UnstableApi} and fulfil {@link #superclassesPredicate} + */ + private static final DescribedPredicate classPredicate = + or( + typePredicate, + and( + annotatedWith(UnstableApi.class), + superclassesPredicate + ) + ); + + /** + * Fields in the public API must either: + * - have type that not extends or implement internal classes, or + * - be annotated with {@link UnstableApi} and have type that fulfils {@link #superclassesPredicate} + */ + private static final DescribedPredicate fieldPredicate = + or( + HasTypeExt.type(typePredicate), + and( + annotatedWith(UnstableApi.class), + HasTypeExt.type(superclassesPredicate) + ) + ); + + /** + * Methods in the public API must either: + * - have return type that not extends or implement internal classes, or + * - be annotated with {@link UnstableApi} and have return type that fulfils {@link #superclassesPredicate} + */ + private static final DescribedPredicate methodReturnTypePredicate = + or( + HasReturnTypeExt.returnType(typePredicate), + and( + annotatedWith(UnstableApi.class), + HasReturnTypeExt.returnType(superclassesPredicate) + ) + ); + + /** + * Parameters of methods in the public API must either: + * - have type that not resides in internal classes, or + * - be annotated with {@link UnstableApi} and have type annotated with {@link UsedInApi} + */ + private static final DescribedPredicate paramPredicate = haveParams(or( + HasTypeExt.rawTypes(not(resideInAPackage("..internal.."))), + and( + annotatedWith(UnstableApi.class), + HasTypeExt.rawTypes(or( + not(resideInAPackage("..internal..")), + annotatedWith(UsedInApi.class) + )) + ) + )); + + @ArchTest + @SuppressWarnings("unused") + public static final ArchRule noInternalsInApiFields = fields() + .that().arePublic() + .or().areProtected() + .and().areDeclaredInClassesThat(packageFilter) + .should(be(fieldPredicate)); + + @ArchTest + @SuppressWarnings("unused") + public static final ArchRule noInternalsInApiClasses = classes() + .that().arePublic() + .or().areProtected() + .and(packageFilter) + .should(be(classPredicate)); + + @ArchTest + @SuppressWarnings("unused") + public static final ArchRule noInternalsInApiMethods = methods() + .that().arePublic() + .or().areProtected() + .and().areDeclaredInClassesThat(packageFilter) + .should(be(methodReturnTypePredicate)) + .andShould(be(paramPredicate)); + + @ArchTest + @SuppressWarnings("unused") + public static final ArchRule noInternalsInApiConstructors = constructors() + .that().arePublic() + .or().areProtected() + .and().areDeclaredInClassesThat(packageFilter) + .should(be(paramPredicate)); + + private static DescribedPredicate superclasses(DescribedPredicate predicate) { + return new DescribedPredicate<>("superclasses " + predicate.getDescription()) { + @Override + public boolean test(JavaClass clazz) { + return Stream.of( + Stream.of(clazz), + clazz.getAllRawSuperclasses().stream(), + clazz.getAllRawInterfaces().stream() + ) + .flatMap(Function.identity()) + .allMatch(predicate); + } + }; + } + + private static DescribedPredicate haveParams(DescribedPredicate predicate) { + return new DescribedPredicate<>("have params " + predicate.getDescription()) { + @Override + public boolean test(JavaCodeUnit method) { + return method.getParameters().stream().allMatch(predicate); + } + }; + } + +} diff --git a/test-non-functional/src/test/java/arch/SerdeArchTest.java b/test-non-functional/src/test/java/arch/SerdeArchTest.java new file mode 100644 index 000000000..05f799dd4 --- /dev/null +++ b/test-non-functional/src/test/java/arch/SerdeArchTest.java @@ -0,0 +1,36 @@ +package arch; + +import com.tngtech.archunit.core.importer.ImportOption.DoNotIncludeTests; +import com.tngtech.archunit.junit.AnalyzeClasses; +import com.tngtech.archunit.junit.ArchTest; +import com.tngtech.archunit.lang.ArchRule; + +import static com.tngtech.archunit.lang.syntax.ArchRuleDefinition.noClasses; + + +@AnalyzeClasses(packages = "com.arangodb..", importOptions = {DoNotIncludeTests.class}) +public class SerdeArchTest { + + @ArchTest + public static final ArchRule noDependencyOnJsonbSerde = noClasses().that() + .resideInAPackage("com.arangodb..").and() + .resideOutsideOfPackage("com.arangodb.serde.jsonb..") + .should().dependOnClassesThat() + .resideInAPackage("com.arangodb.serde.jsonb.."); + + @ArchTest + public static final ArchRule noDependencyOnJacksonDataformatVelocypack = noClasses().that() + .resideInAPackage("com.arangodb..").and() + .resideOutsideOfPackage("com.arangodb.jackson.dataformat.velocypack..").and() + .resideOutsideOfPackage("com.arangodb.serde.jackson..") + .should().dependOnClassesThat() + .resideInAPackage("com.arangodb.jackson.dataformat.velocypack.."); + + @ArchTest + public static final ArchRule noDependencyOnJacksonSerde = noClasses().that() + .resideInAPackage("com.arangodb..").and() + .resideOutsideOfPackage("com.arangodb.serde.jackson..") + .should().dependOnClassesThat() + .resideInAPackage("com.arangodb.serde.jackson.."); + +} diff --git a/test-non-functional/src/test/java/arch/ShadedArchTest.java b/test-non-functional/src/test/java/arch/ShadedArchTest.java new file mode 100644 index 000000000..ec065896b --- /dev/null +++ b/test-non-functional/src/test/java/arch/ShadedArchTest.java @@ -0,0 +1,87 @@ +package arch; + +import com.tngtech.archunit.core.domain.JavaClasses; +import com.tngtech.archunit.core.importer.ClassFileImporter; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import static com.tngtech.archunit.core.importer.ImportOption.Predefined.DO_NOT_INCLUDE_TESTS; +import static com.tngtech.archunit.lang.syntax.ArchRuleDefinition.noClasses; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + + +public class ShadedArchTest { + private final JavaClasses importedClasses = new ClassFileImporter() + .withImportOption(DO_NOT_INCLUDE_TESTS) + .importPackages("com.arangodb.."); + + private final boolean shaded = isShaded(); + + private static boolean isShaded() { + boolean shaded; + try { + Class.forName("com.arangodb.shaded.fasterxml.jackson.databind.JsonNode"); + shaded = true; + } catch (ClassNotFoundException e) { + shaded = false; + } + return shaded; + } + + @BeforeEach + void checkShaded() { + assumeTrue(shaded, "not shaded driver"); + } + + @Test + public void nettyRelocation() { + noClasses().that() + .resideInAPackage("com.arangodb..") + .should().dependOnClassesThat() + .resideInAPackage("io.netty..") + .check(importedClasses); + } + + @Test + public void vertxRelocation() { + noClasses().that() + .resideInAPackage("com.arangodb..") + .should().dependOnClassesThat() + .resideInAPackage("io.vertx..") + .check(importedClasses); + } + + @Test + public void jacksonRelocation() { + noClasses().that() + .resideInAPackage("com.arangodb..").and() + .resideOutsideOfPackage("com.arangodb.jackson.dataformat.velocypack..").and() + .resideOutsideOfPackage("com.arangodb.serde.jackson..") + .should().dependOnClassesThat() + .resideInAPackage("com.fasterxml.jackson..") + .check(importedClasses); + } + + @Test + public void noJacksonDependency() { + noClasses().that() + .resideInAPackage("com.arangodb..").and() + .resideOutsideOfPackages( + "com.arangodb.jackson.dataformat.velocypack..", + "com.arangodb.serde.jackson..") + .should().dependOnClassesThat() + .resideInAPackage("com.fasterxml.jackson..") + .check(importedClasses); + } + + @Test + public void noJacksonDataformatVelocypackDependency() { + noClasses().that() + .resideInAPackage("com.arangodb..").and() + .resideOutsideOfPackage("com.arangodb.jackson.dataformat.velocypack..") + .should().dependOnClassesThat() + .resideInAPackage("com.arangodb.jackson.dataformat.velocypack..") + .check(importedClasses); + } + +} diff --git a/test-non-functional/src/test/java/concurrency/ConnectionLoadBalanceTest.java b/test-non-functional/src/test/java/concurrency/ConnectionLoadBalanceTest.java new file mode 100644 index 000000000..a3f5200a2 --- /dev/null +++ b/test-non-functional/src/test/java/concurrency/ConnectionLoadBalanceTest.java @@ -0,0 +1,113 @@ +package concurrency; + +import com.arangodb.*; +import com.arangodb.config.ArangoConfigProperties; +import com.arangodb.internal.net.ConnectionPoolImpl; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import util.TestUtils; + +import java.time.Duration; +import java.util.Map; +import java.util.concurrent.CompletableFuture; +import java.util.stream.IntStream; +import java.util.stream.Stream; + +import static org.awaitility.Awaitility.await; + +public class ConnectionLoadBalanceTest { + private static final Logger LOGGER = LoggerFactory.getLogger(ConnectionLoadBalanceTest.class); + + public static Stream configs() { + return Stream.of( + // FIXME: DE-1017 + // new Config(Protocol.VST, 1), + // new Config(Protocol.VST, 2), + new Config(Protocol.HTTP_JSON, 10), + new Config(Protocol.HTTP_JSON, 20), + new Config(Protocol.HTTP2_JSON, 1), + new Config(Protocol.HTTP2_JSON, 2) + ).map(Arguments::of); + } + + // Test the requests load balancing across different connections, when all the slots except 1 are busy + @MethodSource("configs") + @ParameterizedTest + void loadBalanceToAvailableSlots(Config cfg) throws InterruptedException { + doTestLoadBalance(cfg, 1); + } + + // Test the requests load balancing across different connections, when all the slots are busy + @MethodSource("configs") + @ParameterizedTest + void loadBalanceAllBusy(Config cfg) throws InterruptedException { + doTestLoadBalance(cfg, 2); + } + + void doTestLoadBalance(Config cfg, int sleepCycles) throws InterruptedException { + int longTasksCount = cfg.maxStreams() * cfg.maxConnections * sleepCycles - 1; + int shortTasksCount = 10; + long sleepDuration = 2; + + ArangoDatabaseAsync db = new ArangoDB.Builder() + .loadProperties(ArangoConfigProperties.fromFile()) + .protocol(cfg.protocol) + .serde(TestUtils.createSerde(cfg.protocol)) + .maxConnections(cfg.maxConnections) + .build().async().db(); + + LOGGER.debug("starting..."); + + CompletableFuture longRunningTasks = CompletableFuture.allOf( + IntStream.range(0, longTasksCount) + .mapToObj(__ -> + db.query("RETURN SLEEP(@duration)", Void.class, Map.of("duration", sleepDuration))) + .toArray(CompletableFuture[]::new) + ); + + Thread.sleep(100); + + CompletableFuture shortRunningTasks = CompletableFuture.allOf( + IntStream.range(0, shortTasksCount) + .mapToObj(__ -> db.getVersion()) + .toArray(CompletableFuture[]::new) + ); + + LOGGER.debug("awaiting..."); + + await() + .timeout(Duration.ofSeconds(sleepDuration * sleepCycles - 1L)) + .until(shortRunningTasks::isDone); + + LOGGER.debug("completed shortRunningTasks"); + + // join exceptional completions + shortRunningTasks.join(); + + await() + .timeout(Duration.ofSeconds(sleepDuration * sleepCycles + 2L)) + .until(longRunningTasks::isDone); + + LOGGER.debug("completed longRunningTasks"); + + // join exceptional completions + longRunningTasks.join(); + + db.arango().shutdown(); + } + + private record Config( + Protocol protocol, + int maxConnections + ) { + int maxStreams() { + return switch (protocol) { + case HTTP_JSON, HTTP_VPACK -> ConnectionPoolImpl.HTTP1_SLOTS; + default -> ConnectionPoolImpl.HTTP2_SLOTS; + }; + } + } +} diff --git a/test-non-functional/src/test/java/concurrency/ConnectionPoolConcurrencyTest.java b/test-non-functional/src/test/java/concurrency/ConnectionPoolConcurrencyTest.java new file mode 100644 index 000000000..bf9641e0c --- /dev/null +++ b/test-non-functional/src/test/java/concurrency/ConnectionPoolConcurrencyTest.java @@ -0,0 +1,63 @@ +package concurrency; + +import com.arangodb.config.HostDescription; +import com.arangodb.internal.InternalRequest; +import com.arangodb.internal.InternalResponse; +import com.arangodb.internal.config.ArangoConfig; +import com.arangodb.internal.net.*; +import org.junit.jupiter.api.Test; + +import java.io.IOException; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.*; + +public class ConnectionPoolConcurrencyTest { + + private final ArangoConfig cfg = new ArangoConfig(); + + { + cfg.setMaxConnections(10_000); + } + + private final ConnectionFactory cf = (config, host, pool) -> new Connection() { + @Override + public void setJwt(String jwt) { + } + + @Override + public CompletableFuture executeAsync(InternalRequest request) { + throw new UnsupportedOperationException(); + } + + @Override + public void release() { + } + + @Override + public void close() { + } + }; + + @Test + void foo() throws InterruptedException, ExecutionException, IOException { + ConnectionPool cp = new ConnectionPoolImpl(HostDescription.parse("127.0.0.1:8529"), cfg, cf); + ExecutorService es = Executors.newCachedThreadPool(); + + List> futures = es.invokeAll(Collections.nCopies(8, (Callable) () -> { + for (int i = 0; i < 10_000; i++) { + cp.createConnection(); + cp.connection(); + cp.setJwt("foo"); + } + return null; + })); + + for (Future future : futures) { + future.get(); + } + cp.close(); + es.shutdown(); + } + +} diff --git a/test-non-functional/src/test/java/example/ExampleBase.java b/test-non-functional/src/test/java/example/ExampleBase.java new file mode 100644 index 000000000..c4bb33763 --- /dev/null +++ b/test-non-functional/src/test/java/example/ExampleBase.java @@ -0,0 +1,65 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package example; + +import com.arangodb.ArangoCollection; +import com.arangodb.ArangoDB; +import com.arangodb.ArangoDatabase; +import com.arangodb.Protocol; +import com.arangodb.config.ArangoConfigProperties; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import util.TestUtils; + +/** + * @author Mark Vollmary + */ +public class ExampleBase { + + protected static final String COLLECTION_NAME = "json_example_collection"; + private static final String DB_NAME = "json_example_db"; + protected static ArangoDatabase db; + protected static ArangoCollection collection; + private static ArangoDB arangoDB; + + @BeforeAll + static void setUp() { + ArangoConfigProperties config = ArangoConfigProperties.fromFile(); + arangoDB = new ArangoDB.Builder() + .loadProperties(config) + .serde(TestUtils.createSerde(config.getProtocol().orElse(Protocol.HTTP2_JSON))) + .build(); + String dbName = DB_NAME; + if (arangoDB.db(dbName).exists()) + arangoDB.db(dbName).drop(); + arangoDB.createDatabase(dbName); + db = arangoDB.db(dbName); + db.createCollection(COLLECTION_NAME); + collection = db.collection(COLLECTION_NAME); + } + + @AfterAll + static void tearDown() { + db.drop(); + arangoDB.shutdown(); + } + +} diff --git a/test-non-functional/src/test/java/example/FirstProject.java b/test-non-functional/src/test/java/example/FirstProject.java new file mode 100644 index 000000000..2ce74358c --- /dev/null +++ b/test-non-functional/src/test/java/example/FirstProject.java @@ -0,0 +1,136 @@ +package example; + +import com.arangodb.ArangoCollection; +import com.arangodb.ArangoCursor; +import com.arangodb.ArangoDB; +import com.arangodb.ArangoDBException; +import com.arangodb.config.ArangoConfigProperties; +import com.arangodb.entity.BaseDocument; +import com.arangodb.entity.CollectionEntity; +import com.fasterxml.jackson.databind.JsonNode; + +import java.util.Collections; +import java.util.Map; +import java.util.UUID; + +public class FirstProject { + + public static void main(final String[] args) { + final ArangoDB arangoDB = new ArangoDB.Builder().loadProperties(ArangoConfigProperties.fromFile()).user("root").build(); + + // create database + final String dbName = "mydb"; + try { + arangoDB.createDatabase(dbName); + System.out.println("Database created: " + dbName); + } catch (final ArangoDBException e) { + System.err.println("Failed to create database: " + dbName + "; " + e.getMessage()); + } + + // create collection + final String collectionName = "firstCollection"; + try { + final CollectionEntity myArangoCollection = arangoDB.db(dbName).createCollection(collectionName); + System.out.println("Collection created: " + myArangoCollection.getName()); + } catch (final ArangoDBException e) { + System.err.println("Failed to create collection: " + collectionName + "; " + e.getMessage()); + } + + // creating a document + final BaseDocument myObject = new BaseDocument(UUID.randomUUID().toString()); + myObject.setKey("myKey"); + myObject.addAttribute("a", "Foo"); + myObject.addAttribute("b", 42); + try { + arangoDB.db(dbName).collection(collectionName).insertDocument(myObject); + System.out.println("Document created"); + } catch (final ArangoDBException e) { + System.err.println("Failed to create document. " + e.getMessage()); + } + + // read a document + try { + final BaseDocument myDocument = arangoDB.db(dbName).collection(collectionName).getDocument("myKey", + BaseDocument.class); + System.out.println("Key: " + myDocument.getKey()); + System.out.println("Attribute a: " + myDocument.getAttribute("a")); + System.out.println("Attribute b: " + myDocument.getAttribute("b")); + } catch (final ArangoDBException e) { + System.err.println("Failed to get document: myKey; " + e.getMessage()); + } + + // read a document as JsonNode + try { + final JsonNode myDocument = arangoDB.db(dbName).collection(collectionName).getDocument("myKey", + JsonNode.class); + System.out.println("Key: " + myDocument.get("_key").textValue()); + System.out.println("Attribute a: " + myDocument.get("a").textValue()); + System.out.println("Attribute b: " + myDocument.get("b").textValue()); + } catch (final ArangoDBException e) { + System.err.println("Failed to get document: myKey; " + e.getMessage()); + } + + // update a document + myObject.addAttribute("c", "Bar"); + try { + arangoDB.db(dbName).collection(collectionName).updateDocument("myKey", myObject); + } catch (final ArangoDBException e) { + System.err.println("Failed to update document. " + e.getMessage()); + } + + // read the document again + try { + final BaseDocument myUpdatedDocument = arangoDB.db(dbName).collection(collectionName).getDocument("myKey", + BaseDocument.class); + System.out.println("Key: " + myUpdatedDocument.getKey()); + System.out.println("Attribute a: " + myUpdatedDocument.getAttribute("a")); + System.out.println("Attribute b: " + myUpdatedDocument.getAttribute("b")); + System.out.println("Attribute c: " + myUpdatedDocument.getAttribute("c")); + } catch (final ArangoDBException e) { + System.err.println("Failed to get document: myKey; " + e.getMessage()); + } + + // delete a document + try { + arangoDB.db(dbName).collection(collectionName).deleteDocument("myKey"); + } catch (final ArangoDBException e) { + System.err.println("Failed to delete document. " + e.getMessage()); + } + + // create some documents for the next step + final ArangoCollection collection = arangoDB.db(dbName).collection(collectionName); + for (int i = 0; i < 10; i++) { + final BaseDocument value = new BaseDocument(UUID.randomUUID().toString()); + value.setKey(String.valueOf(i)); + value.addAttribute("name", "Homer"); + collection.insertDocument(value); + } + + // execute AQL queries + try { + final String query = "FOR t IN firstCollection FILTER t.name == @name RETURN t"; + final Map bindVars = Collections.singletonMap("name", "Homer"); + final ArangoCursor cursor = arangoDB.db(dbName).query(query, BaseDocument.class, bindVars); + while (cursor.hasNext()) { + System.out.println("Key: " + cursor.next().getKey()); + } + } catch (final ArangoDBException e) { + System.err.println("Failed to execute query. " + e.getMessage()); + } + + // delete a document with AQL + try { + final String query = "FOR t IN firstCollection FILTER t.name == @name " + + "REMOVE t IN firstCollection LET removed = OLD RETURN removed"; + final Map bindVars = Collections.singletonMap("name", "Homer"); + final ArangoCursor cursor = arangoDB.db(dbName).query(query, BaseDocument.class, bindVars); + while (cursor.hasNext()) { + System.out.println("Removed document " + cursor.next().getKey()); + } + } catch (final ArangoDBException e) { + System.err.println("Failed to execute query. " + e.getMessage()); + } + + } + +} diff --git a/test-non-functional/src/test/java/example/document/AqlQueryWithSpecialReturnTypesExampleTest.java b/test-non-functional/src/test/java/example/document/AqlQueryWithSpecialReturnTypesExampleTest.java new file mode 100644 index 000000000..8f93d4cb0 --- /dev/null +++ b/test-non-functional/src/test/java/example/document/AqlQueryWithSpecialReturnTypesExampleTest.java @@ -0,0 +1,132 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package example.document; + +import com.arangodb.ArangoCursor; +import com.arangodb.entity.BaseDocument; +import com.fasterxml.jackson.databind.node.ArrayNode; +import com.fasterxml.jackson.databind.node.ObjectNode; +import example.ExampleBase; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; + +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.UUID; + +import static org.assertj.core.api.Assertions.assertThat; + + +/** + * @author Mark Vollmary + */ +class AqlQueryWithSpecialReturnTypesExampleTest extends ExampleBase { + + @BeforeAll + static void before() { + createExamples(); + } + + private static void createExamples() { + for (int i = 0; i < 100; i++) { + final BaseDocument value = new BaseDocument(UUID.randomUUID().toString()); + value.addAttribute("name", "TestUser" + i); + value.addAttribute("gender", (i % 2) == 0 ? Gender.MALE : Gender.FEMALE); + value.addAttribute("age", i + 10); + db.collection(COLLECTION_NAME).insertDocument(value); + } + } + + @Test + void aqlWithLimitQueryAsJsonObject() { + final String query = "FOR t IN " + COLLECTION_NAME + + " FILTER t.age >= 20 && t.age < 30 && t.gender == @gender RETURN t"; + final Map bindVars = Collections.singletonMap("gender", Gender.FEMALE); + final ArangoCursor cursor = db.query(query, ObjectNode.class, bindVars); + assertThat((Object) cursor).isNotNull(); + while (cursor.hasNext()) { + final ObjectNode node = cursor.next(); + assertThat(node.get("name").asText()) + .isIn("TestUser11", "TestUser13", "TestUser15", "TestUser17", "TestUser19"); + assertThat(node.get("gender").asText()).isEqualTo(Gender.FEMALE.name()); + assertThat(node.get("age").asInt()).isIn(21, 23, 25, 27, 29); + } + } + + @Test + void aqlWithLimitQueryAsJsonArray() { + final String query = "FOR t IN " + COLLECTION_NAME + + " FILTER t.age >= 20 && t.age < 30 && t.gender == @gender RETURN [t.name, t.gender, t.age]"; + final Map bindVars = Collections.singletonMap("gender", Gender.FEMALE); + final ArangoCursor cursor = db.query(query, ArrayNode.class, bindVars); + assertThat((Object) cursor).isNotNull(); + while (cursor.hasNext()) { + final ArrayNode arrNode = cursor.next(); + assertThat(arrNode.get(0).asText()) + .isIn("TestUser11", "TestUser13", "TestUser15", "TestUser17", "TestUser19"); + assertThat(arrNode.get(1).asText()).isEqualTo(Gender.FEMALE.name()); + assertThat(arrNode.get(2).asInt()).isIn(21, 23, 25, 27, 29); + } + } + + @Test + void aqlWithLimitQueryAsMap() { + final String query = "FOR t IN " + COLLECTION_NAME + + " FILTER t.age >= 20 && t.age < 30 && t.gender == @gender RETURN t"; + final Map bindVars = Collections.singletonMap("gender", Gender.FEMALE); + final ArangoCursor cursor = db.query(query, Map.class, bindVars); + assertThat((Object) cursor).isNotNull(); + while (cursor.hasNext()) { + final Map map = cursor.next(); + assertThat(map.get("name")).isNotNull(); + assertThat(String.valueOf(map.get("name"))) + .isIn("TestUser11", "TestUser13", "TestUser15", "TestUser17", "TestUser19"); + assertThat(map.get("gender")).isNotNull(); + assertThat(String.valueOf(map.get("gender"))).isEqualTo(Gender.FEMALE.name()); + assertThat(map.get("age")).isNotNull(); + assertThat(Long.valueOf(map.get("age").toString())).isIn(21L, 23L, 25L, 27L, 29L); + } + } + + @Test + void aqlWithLimitQueryAsList() { + final String query = "FOR t IN " + COLLECTION_NAME + + " FILTER t.age >= 20 && t.age < 30 && t.gender == @gender RETURN [t.name, t.gender, t.age]"; + final Map bindVars = Collections.singletonMap("gender", Gender.FEMALE); + final ArangoCursor cursor = db.query(query, List.class, bindVars); + assertThat((Object) cursor).isNotNull(); + while (cursor.hasNext()) { + final List list = cursor.next(); + assertThat(list.get(0)).isNotNull(); + assertThat(String.valueOf(list.get(0))) + .isIn("TestUser11", "TestUser13", "TestUser15", "TestUser17", "TestUser19"); + assertThat(list.get(1)).isNotNull(); + assertThat(Gender.valueOf(String.valueOf(list.get(1)))).isEqualTo(Gender.FEMALE); + assertThat(list.get(2)).isNotNull(); + assertThat(Long.valueOf(String.valueOf(list.get(2)))).isIn(21L, 23L, 25L, 27L, 29L); + } + } + + enum Gender { + MALE, FEMALE + } +} diff --git a/test-non-functional/src/test/java/example/document/GetDocumentExampleTest.java b/test-non-functional/src/test/java/example/document/GetDocumentExampleTest.java new file mode 100644 index 000000000..e4e470028 --- /dev/null +++ b/test-non-functional/src/test/java/example/document/GetDocumentExampleTest.java @@ -0,0 +1,101 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package example.document; + +import com.arangodb.RequestContext; +import com.arangodb.entity.BaseDocument; +import com.arangodb.internal.RequestContextHolder; +import com.arangodb.util.RawBytes; +import com.arangodb.util.RawJson; +import com.fasterxml.jackson.databind.JsonNode; +import example.ExampleBase; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; + +import java.util.Map; +import java.util.UUID; + +import static org.assertj.core.api.Assertions.assertThat; + + +/** + * @author Mark Vollmary + */ +class GetDocumentExampleTest extends ExampleBase { + + private static String key = null; + + @BeforeAll + static void before() { + final BaseDocument value = new BaseDocument(UUID.randomUUID().toString()); + value.addAttribute("foo", "bar"); + key = collection.insertDocument(value).getKey(); + } + + @Test + void getAsBean() { + final TestEntity doc = collection.getDocument(key, TestEntity.class); + assertThat(doc).isNotNull(); + assertThat(doc.getFoo()).isEqualTo("bar"); + } + + @Test + void getAsBaseDocument() { + final BaseDocument doc = collection.getDocument(key, BaseDocument.class); + assertThat(doc).isNotNull(); + assertThat(doc.getAttribute("foo")).isNotNull(); + assertThat(String.valueOf(doc.getAttribute("foo"))).isEqualTo("bar"); + } + + @Test + void getAsMap() { + final Map doc = collection.getDocument(key, Map.class); + assertThat(doc).isNotNull(); + assertThat(doc.get("foo")).isNotNull(); + assertThat(String.valueOf(doc.get("foo"))).isEqualTo("bar"); + } + + @Test + void getAsJsonNode() { + final JsonNode doc = collection.getDocument(key, JsonNode.class); + assertThat(doc).isNotNull(); + assertThat(doc.get("foo").isTextual()).isTrue(); + assertThat(doc.get("foo").asText()).isEqualTo("bar"); + } + + @Test + void getAsJson() { + final RawJson doc = collection.getDocument(key, RawJson.class); + assertThat(doc.get()).isNotNull() + .contains("foo") + .contains("bar"); + } + + @Test + void getAsBytes() { + final RawBytes doc = collection.getDocument(key, RawBytes.class); + assertThat(doc.get()).isNotNull(); + Map mapDoc = RequestContextHolder.INSTANCE.runWithCtx(RequestContext.EMPTY, () -> + collection.getSerde().deserializeUserData(doc.get(), Map.class)); + assertThat(mapDoc).containsEntry("foo", "bar"); + } + +} diff --git a/test-non-functional/src/test/java/example/document/InsertDocumentExampleTest.java b/test-non-functional/src/test/java/example/document/InsertDocumentExampleTest.java new file mode 100644 index 000000000..60921d310 --- /dev/null +++ b/test-non-functional/src/test/java/example/document/InsertDocumentExampleTest.java @@ -0,0 +1,70 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package example.document; + +import com.arangodb.entity.BaseDocument; +import com.arangodb.entity.DocumentCreateEntity; +import com.arangodb.util.RawJson; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.node.ObjectNode; +import example.ExampleBase; +import org.junit.jupiter.api.Test; + +import java.util.UUID; + +import static org.assertj.core.api.Assertions.assertThat; + + +/** + * @author Mark Vollmary + */ +class InsertDocumentExampleTest extends ExampleBase { + + @Test + void insertBean() { + final DocumentCreateEntity doc = collection.insertDocument(new TestEntity("bar")); + assertThat(doc.getKey()).isNotNull(); + } + + @Test + void insertBaseDocument() { + final BaseDocument value = new BaseDocument(UUID.randomUUID().toString()); + value.addAttribute("foo", "bar"); + final DocumentCreateEntity doc = collection.insertDocument(value); + assertThat(doc.getKey()).isNotNull(); + } + + @Test + void insertJsonNode() { + ObjectMapper mapper = new ObjectMapper(); + ObjectNode node = mapper.createObjectNode(); + node.put("foo", "bar"); + final DocumentCreateEntity doc = collection.insertDocument(node); + assertThat(doc.getKey()).isNotNull(); + } + + @Test + void insertJson() { + final DocumentCreateEntity doc = collection.insertDocument(RawJson.of("{\"foo\":\"bar\"}")); + assertThat(doc.getKey()).isNotNull(); + } + +} diff --git a/src/test/java/com/arangodb/example/document/TestEntity.java b/test-non-functional/src/test/java/example/document/TestEntity.java similarity index 70% rename from src/test/java/com/arangodb/example/document/TestEntity.java rename to test-non-functional/src/test/java/example/document/TestEntity.java index 774aef2be..fc72c3230 100644 --- a/src/test/java/com/arangodb/example/document/TestEntity.java +++ b/test-non-functional/src/test/java/example/document/TestEntity.java @@ -1,48 +1,47 @@ -/* - * DISCLAIMER - * - * Copyright 2016 ArangoDB GmbH, Cologne, Germany - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright holder is ArangoDB GmbH, Cologne, Germany - */ - -package com.arangodb.example.document; - -/** - * @author Mark Vollmary - * - */ -public class TestEntity { - - private String foo; - - public TestEntity() { - super(); - } - - public TestEntity(final String foo) { - super(); - this.foo = foo; - } - - public String getFoo() { - return foo; - } - - public void setFoo(final String foo) { - this.foo = foo; - } - -} +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package example.document; + +/** + * @author Mark Vollmary + */ +public class TestEntity { + + private String foo; + + public TestEntity() { + super(); + } + + public TestEntity(final String foo) { + super(); + this.foo = foo; + } + + public String getFoo() { + return foo; + } + + public void setFoo(final String foo) { + this.foo = foo; + } + +} diff --git a/test-non-functional/src/test/java/example/graph/AQLActorsAndMoviesExampleTest.java b/test-non-functional/src/test/java/example/graph/AQLActorsAndMoviesExampleTest.java new file mode 100644 index 000000000..4088f8454 --- /dev/null +++ b/test-non-functional/src/test/java/example/graph/AQLActorsAndMoviesExampleTest.java @@ -0,0 +1,562 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package example.graph; + +import com.arangodb.*; +import com.arangodb.config.ArangoConfigProperties; +import com.arangodb.entity.BaseDocument; +import com.arangodb.entity.BaseEdgeDocument; +import com.arangodb.entity.CollectionType; +import com.arangodb.entity.DocumentCreateEntity; +import com.arangodb.model.CollectionCreateOptions; +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; +import util.TestUtils; + +import java.util.UUID; + +import static org.assertj.core.api.Assertions.assertThat; + + +/** + * @author Mark Vollmary + * @see + * AQL Example Queries on an + * Actors and Movies Database + */ +class AQLActorsAndMoviesExampleTest { + + private static final String TEST_DB = "actors_movies_test_db"; + private static ArangoDB arangoDB; + private static ArangoDatabase db; + + @BeforeAll + static void setUp() { + ArangoConfigProperties config = ArangoConfigProperties.fromFile(); + arangoDB = new ArangoDB.Builder() + .loadProperties(config) + .serde(TestUtils.createSerde(config.getProtocol().orElse(Protocol.HTTP2_JSON))) + .build(); + if (arangoDB.db(TEST_DB).exists()) + arangoDB.db(TEST_DB).drop(); + arangoDB.createDatabase(TEST_DB); + db = arangoDB.db(TEST_DB); + createData(); + } + + @AfterAll + static void tearDown() { + db.drop(); + arangoDB.shutdown(); + } + + private static DocumentCreateEntity saveMovie( + final ArangoCollection movies, + final String key, + final String title, + final int released, + final String tagline) { + final BaseDocument value = new BaseDocument(UUID.randomUUID().toString()); + value.setKey(key); + value.addAttribute("title", title); + value.addAttribute("released", released); + value.addAttribute("tagline", tagline); + return movies.insertDocument(value); + } + + private static DocumentCreateEntity saveActor( + final ArangoCollection actors, + final String key, + final String name, + final int born) { + final BaseDocument value = new BaseDocument(UUID.randomUUID().toString()); + value.setKey(key); + value.addAttribute("name", name); + value.addAttribute("born", born); + return actors.insertDocument(value); + } + + private static void saveActsIn( + final ArangoCollection actsIn, + final String actor, + final String movie, + final String[] roles, + final int year) { + final BaseEdgeDocument value = new BaseEdgeDocument(); + value.setFrom(actor); + value.setTo(movie); + value.addAttribute("roles", roles); + value.addAttribute("year", year); + actsIn.insertDocument(value); + } + + private static void createData() { + db.createCollection("actors"); + final ArangoCollection actors = db.collection("actors"); + db.createCollection("movies"); + final ArangoCollection movies = db.collection("movies"); + db.createCollection("actsIn", new CollectionCreateOptions().type(CollectionType.EDGES)); + final ArangoCollection actsIn = db.collection("actsIn"); + + final String theMatrix = saveMovie(movies, "TheMatrix", "The Matrix", 1999, "Welcome to the Real World") + .getId(); + final String keanu = saveActor(actors, "Keanu", "Keanu Reeves", 1964).getId(); + final String carrie = saveActor(actors, "Carrie", "Carrie-Anne Moss", 1967).getId(); + final String laurence = saveActor(actors, "Laurence", "Laurence Fishburne", 1961).getId(); + final String hugo = saveActor(actors, "Hugo", "Hugo Weaving", 1960).getId(); + final String emil = saveActor(actors, "Emil", "Emil Eifrem", 1978).getId(); + + saveActsIn(actsIn, keanu, theMatrix, new String[]{"Neo"}, 1999); + saveActsIn(actsIn, carrie, theMatrix, new String[]{"Trinity"}, 1999); + saveActsIn(actsIn, laurence, theMatrix, new String[]{"Morpheus"}, 1999); + saveActsIn(actsIn, hugo, theMatrix, new String[]{"Agent Smith"}, 1999); + saveActsIn(actsIn, emil, theMatrix, new String[]{"Emil"}, 1999); + + final String theMatrixReloaded = saveMovie(movies, "TheMatrixReloaded", "The Matrix Reloaded", 2003, + "Free your mind").getId(); + saveActsIn(actsIn, keanu, theMatrixReloaded, new String[]{"Neo"}, 2003); + saveActsIn(actsIn, carrie, theMatrixReloaded, new String[]{"Trinity"}, 2003); + saveActsIn(actsIn, laurence, theMatrixReloaded, new String[]{"Morpheus"}, 2003); + saveActsIn(actsIn, hugo, theMatrixReloaded, new String[]{"Agent Smith"}, 2003); + + final String theMatrixRevolutions = saveMovie(movies, "TheMatrixRevolutions", "The Matrix Revolutions", 2003, + "Everything that has a beginning has an end").getId(); + saveActsIn(actsIn, keanu, theMatrixRevolutions, new String[]{"Neo"}, 2003); + saveActsIn(actsIn, carrie, theMatrixRevolutions, new String[]{"Trinity"}, 2003); + saveActsIn(actsIn, laurence, theMatrixRevolutions, new String[]{"Morpheus"}, 2003); + saveActsIn(actsIn, hugo, theMatrixRevolutions, new String[]{"Agent Smith"}, 2003); + + final String theDevilsAdvocate = saveMovie(movies, "TheDevilsAdvocate", "The Devil's Advocate", 1997, + "Evil has its winning ways").getId(); + final String charlize = saveActor(actors, "Charlize", "Charlize Theron", 1975).getId(); + final String al = saveActor(actors, "Al", "Al Pacino", 1940).getId(); + saveActsIn(actsIn, keanu, theDevilsAdvocate, new String[]{"Kevin Lomax"}, 1997); + saveActsIn(actsIn, charlize, theDevilsAdvocate, new String[]{"Mary Ann Lomax"}, 1997); + saveActsIn(actsIn, al, theDevilsAdvocate, new String[]{"John Milton"}, 1997); + + final String AFewGoodMen = saveMovie(movies, "AFewGoodMen", "A Few Good Men", 1992, + "In the heart of the nation's capital, in a courthouse of the U.S. government, one man will stop at " + + "nothing to keep his honor, and one will stop at nothing to find the truth.") + .getId(); + final String tomC = saveActor(actors, "TomC", "Tom Cruise", 1962).getId(); + final String jackN = saveActor(actors, "JackN", "Jack Nicholson", 1937).getId(); + final String demiM = saveActor(actors, "DemiM", "Demi Moore", 1962).getId(); + final String kevinB = saveActor(actors, "KevinB", "Kevin Bacon", 1958).getId(); + final String kieferS = saveActor(actors, "KieferS", "Kiefer Sutherland", 1966).getId(); + final String noahW = saveActor(actors, "NoahW", "Noah Wyle", 1971).getId(); + final String cubaG = saveActor(actors, "CubaG", "Cuba Gooding Jr.", 1968).getId(); + final String kevinP = saveActor(actors, "KevinP", "Kevin Pollak", 1957).getId(); + final String jTW = saveActor(actors, "JTW", "J.T. Walsh", 1943).getId(); + final String jamesM = saveActor(actors, "JamesM", "James Marshall", 1967).getId(); + final String christopherG = saveActor(actors, "ChristopherG", "Christopher Guest", 1948).getId(); + saveActsIn(actsIn, tomC, AFewGoodMen, new String[]{"Lt. Daniel Kaffee"}, 1992); + saveActsIn(actsIn, jackN, AFewGoodMen, new String[]{"Col. Nathan R. Jessup"}, 1992); + saveActsIn(actsIn, demiM, AFewGoodMen, new String[]{"Lt. Cdr. JoAnne Galloway"}, 1992); + saveActsIn(actsIn, kevinB, AFewGoodMen, new String[]{"Capt. Jack Ross"}, 1992); + saveActsIn(actsIn, kieferS, AFewGoodMen, new String[]{"Lt. Jonathan Kendrick"}, 1992); + saveActsIn(actsIn, noahW, AFewGoodMen, new String[]{"Cpl. Jeffrey Barnes"}, 1992); + saveActsIn(actsIn, cubaG, AFewGoodMen, new String[]{"Cpl. Carl Hammaker"}, 1992); + saveActsIn(actsIn, kevinP, AFewGoodMen, new String[]{"Lt. Sam Weinberg"}, 1992); + saveActsIn(actsIn, jTW, AFewGoodMen, new String[]{"Lt. Col. Matthew Andrew Markinson"}, 1992); + saveActsIn(actsIn, jamesM, AFewGoodMen, new String[]{"Pfc. Louden Downey"}, 1992); + saveActsIn(actsIn, christopherG, AFewGoodMen, new String[]{"Dr. Stone"}, 1992); + + final String topGun = saveMovie(movies, "TopGun", "Top Gun", 1986, "I feel the need, the need for speed.") + .getId(); + final String kellyM = saveActor(actors, "KellyM", "Kelly McGillis", 1957).getId(); + final String valK = saveActor(actors, "ValK", "Val Kilmer", 1959).getId(); + final String anthonyE = saveActor(actors, "AnthonyE", "Anthony Edwards", 1962).getId(); + final String tomS = saveActor(actors, "TomS", "Tom Skerritt", 1933).getId(); + final String megR = saveActor(actors, "MegR", "Meg Ryan", 1961).getId(); + saveActsIn(actsIn, tomC, topGun, new String[]{"Maverick"}, 1986); + saveActsIn(actsIn, kellyM, topGun, new String[]{"Charlie"}, 1986); + saveActsIn(actsIn, valK, topGun, new String[]{"Iceman"}, 1986); + saveActsIn(actsIn, anthonyE, topGun, new String[]{"Goose"}, 1986); + saveActsIn(actsIn, tomS, topGun, new String[]{"Viper"}, 1986); + saveActsIn(actsIn, megR, topGun, new String[]{"Carole"}, 1986); + + final String jerryMaguire = saveMovie(movies, "JerryMaguire", "Jerry Maguire", 2000, + "The rest of his life begins now.").getId(); + final String reneeZ = saveActor(actors, "ReneeZ", "Renee Zellweger", 1969).getId(); + final String kellyP = saveActor(actors, "KellyP", "Kelly Preston", 1962).getId(); + final String jerryO = saveActor(actors, "JerryO", "Jerry O'Connell", 1974).getId(); + final String jayM = saveActor(actors, "JayM", "Jay Mohr", 1970).getId(); + final String bonnieH = saveActor(actors, "BonnieH", "Bonnie Hunt", 1961).getId(); + final String reginaK = saveActor(actors, "ReginaK", "Regina King", 1971).getId(); + final String jonathanL = saveActor(actors, "JonathanL", "Jonathan Lipnicki", 1996).getId(); + saveActsIn(actsIn, tomC, jerryMaguire, new String[]{"Jerry Maguire"}, 2000); + saveActsIn(actsIn, cubaG, jerryMaguire, new String[]{"Rod Tidwell"}, 2000); + saveActsIn(actsIn, reneeZ, jerryMaguire, new String[]{"Dorothy Boyd"}, 2000); + saveActsIn(actsIn, kellyP, jerryMaguire, new String[]{"Avery Bishop"}, 2000); + saveActsIn(actsIn, jerryO, jerryMaguire, new String[]{"Frank Cushman"}, 2000); + saveActsIn(actsIn, jayM, jerryMaguire, new String[]{"Bob Sugar"}, 2000); + saveActsIn(actsIn, bonnieH, jerryMaguire, new String[]{"Laurel Boyd"}, 2000); + saveActsIn(actsIn, reginaK, jerryMaguire, new String[]{"Marcee Tidwell"}, 2000); + saveActsIn(actsIn, jonathanL, jerryMaguire, new String[]{"Ray Boyd"}, 2000); + + final String standByMe = saveMovie(movies, "StandByMe", "Stand By Me", 1986, + "For some, it's the last real taste of innocence, and the first real taste of life. But for everyone," + + " it's the time that memories are made of.") + .getId(); + final String riverP = saveActor(actors, "RiverP", "River Phoenix", 1970).getId(); + final String coreyF = saveActor(actors, "CoreyF", "Corey Feldman", 1971).getId(); + final String wilW = saveActor(actors, "WilW", "Wil Wheaton", 1972).getId(); + final String johnC = saveActor(actors, "JohnC", "John Cusack", 1966).getId(); + final String marshallB = saveActor(actors, "MarshallB", "Marshall Bell", 1942).getId(); + saveActsIn(actsIn, wilW, standByMe, new String[]{"Gordie Lachance"}, 1986); + saveActsIn(actsIn, riverP, standByMe, new String[]{"Chris Chambers"}, 1986); + saveActsIn(actsIn, jerryO, standByMe, new String[]{"Vern Tessio"}, 1986); + saveActsIn(actsIn, coreyF, standByMe, new String[]{"Teddy Duchamp"}, 1986); + saveActsIn(actsIn, johnC, standByMe, new String[]{"Denny Lachance"}, 1986); + saveActsIn(actsIn, kieferS, standByMe, new String[]{"Ace Merrill"}, 1986); + saveActsIn(actsIn, marshallB, standByMe, new String[]{"Mr. Lachance"}, 1986); + + final String asGoodAsItGets = saveMovie(movies, "AsGoodAsItGets", "As Good as It Gets", 1997, + "A comedy from the heart that goes for the throat.").getId(); + final String helenH = saveActor(actors, "HelenH", "Helen Hunt", 1963).getId(); + final String gregK = saveActor(actors, "GregK", "Greg Kinnear", 1963).getId(); + saveActsIn(actsIn, jackN, asGoodAsItGets, new String[]{"Melvin Udall"}, 1997); + saveActsIn(actsIn, helenH, asGoodAsItGets, new String[]{"Carol Connelly"}, 1997); + saveActsIn(actsIn, gregK, asGoodAsItGets, new String[]{"Simon Bishop"}, 1997); + saveActsIn(actsIn, cubaG, asGoodAsItGets, new String[]{"Frank Sachs"}, 1997); + + final String whatDreamsMayCome = saveMovie(movies, "WhatDreamsMayCome", "What Dreams May Come", 1998, + "After life there is more. The end is just the beginning.").getId(); + final String annabellaS = saveActor(actors, "AnnabellaS", "Annabella Sciorra", 1960).getId(); + final String maxS = saveActor(actors, "MaxS", "Max von Sydow", 1929).getId(); + final String wernerH = saveActor(actors, "WernerH", "Werner Herzog", 1942).getId(); + final String robin = saveActor(actors, "Robin", "Robin Williams", 1951).getId(); + saveActsIn(actsIn, robin, whatDreamsMayCome, new String[]{"Chris Nielsen"}, 1998); + saveActsIn(actsIn, cubaG, whatDreamsMayCome, new String[]{"Albert Lewis"}, 1998); + saveActsIn(actsIn, annabellaS, whatDreamsMayCome, new String[]{"Annie Collins-Nielsen"}, 1998); + saveActsIn(actsIn, maxS, whatDreamsMayCome, new String[]{"The Tracker"}, 1998); + saveActsIn(actsIn, wernerH, whatDreamsMayCome, new String[]{"The Face"}, 1998); + + final String snowFallingonCedars = saveMovie(movies, "SnowFallingonCedars", "Snow Falling on Cedars", 1999, + "First loves last. Forever.").getId(); + final String ethanH = saveActor(actors, "EthanH", "Ethan Hawke", 1970).getId(); + final String rickY = saveActor(actors, "RickY", "Rick Yune", 1971).getId(); + final String jamesC = saveActor(actors, "JamesC", "James Cromwell", 1940).getId(); + saveActsIn(actsIn, ethanH, snowFallingonCedars, new String[]{"Ishmael Chambers"}, 1999); + saveActsIn(actsIn, rickY, snowFallingonCedars, new String[]{"Kazuo Miyamoto"}, 1999); + saveActsIn(actsIn, maxS, snowFallingonCedars, new String[]{"Nels Gudmundsson"}, 1999); + saveActsIn(actsIn, jamesC, snowFallingonCedars, new String[]{"Judge Fielding"}, 1999); + + final String youveGotMail = saveMovie(movies, "YouveGotMail", "You've Got Mail", 1998, + "At odds in life... in love on-line.").getId(); + final String parkerP = saveActor(actors, "ParkerP", "Parker Posey", 1968).getId(); + final String daveC = saveActor(actors, "DaveC", "Dave Chappelle", 1973).getId(); + final String steveZ = saveActor(actors, "SteveZ", "Steve Zahn", 1967).getId(); + final String tomH = saveActor(actors, "TomH", "Tom Hanks", 1956).getId(); + saveActsIn(actsIn, tomH, youveGotMail, new String[]{"Joe Fox"}, 1998); + saveActsIn(actsIn, megR, youveGotMail, new String[]{"Kathleen Kelly"}, 1998); + saveActsIn(actsIn, gregK, youveGotMail, new String[]{"Frank Navasky"}, 1998); + saveActsIn(actsIn, parkerP, youveGotMail, new String[]{"Patricia Eden"}, 1998); + saveActsIn(actsIn, daveC, youveGotMail, new String[]{"Kevin Jackson"}, 1998); + saveActsIn(actsIn, steveZ, youveGotMail, new String[]{"George Pappas"}, 1998); + + final String sleeplessInSeattle = saveMovie(movies, "SleeplessInSeattle", "Sleepless in Seattle", 1993, + "What if someone you never met, someone you never saw, someone you never knew was the only someone " + + "for you?") + .getId(); + final String ritaW = saveActor(actors, "RitaW", "Rita Wilson", 1956).getId(); + final String billPull = saveActor(actors, "BillPull", "Bill Pullman", 1953).getId(); + final String victorG = saveActor(actors, "VictorG", "Victor Garber", 1949).getId(); + final String rosieO = saveActor(actors, "RosieO", "Rosie O'Donnell", 1962).getId(); + saveActsIn(actsIn, tomH, sleeplessInSeattle, new String[]{"Sam Baldwin"}, 1993); + saveActsIn(actsIn, megR, sleeplessInSeattle, new String[]{"Annie Reed"}, 1993); + saveActsIn(actsIn, ritaW, sleeplessInSeattle, new String[]{"Suzy"}, 1993); + saveActsIn(actsIn, billPull, sleeplessInSeattle, new String[]{"Walter"}, 1993); + saveActsIn(actsIn, victorG, sleeplessInSeattle, new String[]{"Greg"}, 1993); + saveActsIn(actsIn, rosieO, sleeplessInSeattle, new String[]{"Becky"}, 1993); + + final String joeVersustheVolcano = saveMovie(movies, "JoeVersustheVolcano", "Joe Versus the Volcano", 1990, + "A story of love, lava and burning desire.").getId(); + final String nathan = saveActor(actors, "Nathan", "Nathan Lane", 1956).getId(); + saveActsIn(actsIn, tomH, joeVersustheVolcano, new String[]{"Joe Banks"}, 1990); + saveActsIn(actsIn, megR, joeVersustheVolcano, + new String[]{"DeDe', 'Angelica Graynamore', 'Patricia Graynamore"}, 1990); + saveActsIn(actsIn, nathan, joeVersustheVolcano, new String[]{"Baw"}, 1990); + + final String whenHarryMetSally = saveMovie(movies, "WhenHarryMetSally", "When Harry Met Sally", 1998, + "At odds in life... in love on-line.").getId(); + final String billyC = saveActor(actors, "BillyC", "Billy Crystal", 1948).getId(); + final String carrieF = saveActor(actors, "CarrieF", "Carrie Fisher", 1956).getId(); + final String brunoK = saveActor(actors, "BrunoK", "Bruno Kirby", 1949).getId(); + saveActsIn(actsIn, billyC, whenHarryMetSally, new String[]{"Harry Burns"}, 1998); + saveActsIn(actsIn, megR, whenHarryMetSally, new String[]{"Sally Albright"}, 1998); + saveActsIn(actsIn, carrieF, whenHarryMetSally, new String[]{"Marie"}, 1998); + saveActsIn(actsIn, brunoK, whenHarryMetSally, new String[]{"Jess"}, 1998); + } + + /** + * @see AQL + * Example Queries on an Actors and Movies Database + */ + @Test + void allActorsActsInMovie1or2() { + final ArangoCursor cursor = db.query( + "WITH actors, movies FOR x IN ANY 'movies/TheMatrix' actsIn OPTIONS {bfs: true, uniqueVertices: " + + "'global'} RETURN x._id", String.class); + assertThat(cursor.asListRemaining()) + .contains("actors/Keanu", "actors/Hugo", "actors/Emil", "actors/Carrie", "actors/Laurence"); + } + + /** + * @see AQL + * Example Queries on an Actors and Movies Database + */ + @Test + void allActorsActsInMovie1or2UnionDistinct() { + final ArangoCursor cursor = db.query( + "WITH actors, movies FOR x IN UNION_DISTINCT ((FOR y IN ANY 'movies/TheMatrix' actsIn OPTIONS {bfs: " + + "true, uniqueVertices: 'global'} RETURN y._id), (FOR y IN ANY 'movies/TheDevilsAdvocate' " + + "actsIn OPTIONS {bfs: true, uniqueVertices: 'global'} RETURN y._id)) RETURN x", String.class); + assertThat(cursor.asListRemaining()).contains("actors/Emil", "actors/Hugo", "actors/Carrie", "actors/Laurence", + "actors/Keanu", "actors/Al", "actors/Charlize"); + } + + /** + * @see AQL + * Example Queries on an Actors and Movies Database + */ + @Test + void allActorsActsInMovie1and2() { + final ArangoCursor cursor = db.query( + "WITH actors, movies FOR x IN INTERSECTION ((FOR y IN ANY 'movies/TheMatrix' actsIn OPTIONS {bfs: " + + "true, uniqueVertices: 'global'} RETURN y._id), (FOR y IN ANY 'movies/TheDevilsAdvocate' " + + "actsIn OPTIONS {bfs: true, uniqueVertices: 'global'} RETURN y._id)) RETURN x", String.class); + assertThat(cursor.asListRemaining()).contains("actors/Keanu"); + } + + /** + * @see AQL + * Example Queries on an Actors and Movies Database + */ + @Test + void allMoviesBetweenActor1andActor2() { + final ArangoCursor cursor = db.query( + "WITH actors, movies FOR x IN INTERSECTION ((FOR y IN ANY 'actors/Hugo' actsIn OPTIONS {bfs: true, " + + "uniqueVertices: 'global'} RETURN y._id), (FOR y IN ANY 'actors/Keanu' actsIn OPTIONS {bfs: " + + "true, uniqueVertices: 'global'} RETURN y._id)) RETURN x", String.class); + assertThat(cursor.asListRemaining()) + .contains("movies/TheMatrixRevolutions", "movies/TheMatrixReloaded", "movies/TheMatrix"); + } + + /** + * @see AQL + * Example Queries on an Actors and Movies Database + */ + @Test + void allActorsWhoActedIn3orMoreMovies() { + final ArangoCursor cursor = db.query( + "FOR x IN actsIn COLLECT actor = x._from WITH COUNT INTO counter FILTER counter >= 3 RETURN {actor: " + + "actor, movies: counter}", Actor.class); + assertThat(cursor.asListRemaining()) + .contains(new Actor("actors/Carrie", 3), new Actor("actors/CubaG", 4), new Actor("actors/Hugo", 3), + new Actor("actors/Keanu", 4), new Actor("actors/Laurence", 3), new Actor("actors/MegR", 5), + new Actor("actors/TomC", 3), new Actor("actors/TomH", 3)); + } + + /** + * @see AQL + * Example Queries on an Actors and Movies Database + */ + @Test + void allMoviesWhereExactly6ActorsActedIn() { + final ArangoCursor cursor = db.query( + "FOR x IN actsIn COLLECT movie = x._to WITH COUNT INTO counter FILTER counter == 6 RETURN movie", String.class); + assertThat(cursor.asListRemaining()) + .contains("movies/SleeplessInSeattle", "movies/TopGun", "movies/YouveGotMail"); + } + + /** + * @see AQL + * Example Queries on an Actors and Movies Database + */ + @Test + void theNumberOfActorsByMovie() { + final ArangoCursor cursor = db.query( + "FOR x IN actsIn COLLECT movie = x._to WITH COUNT INTO counter RETURN {movie: movie, actors: counter}", + Movie.class); + assertThat(cursor.asListRemaining()) + .contains(new Movie("movies/AFewGoodMen", 11), new Movie("movies/AsGoodAsItGets", 4), + new Movie("movies/JerryMaguire", 9), new Movie("movies/JoeVersustheVolcano", 3), + new Movie("movies/SleeplessInSeattle", 6), new Movie("movies/SnowFallingonCedars", 4), + new Movie("movies/StandByMe", 7), new Movie("movies/TheDevilsAdvocate", 3), + new Movie("movies/TheMatrix", 5), new Movie("movies/TheMatrixReloaded", 4), + new Movie("movies/TheMatrixRevolutions", 4), new Movie("movies/TopGun", 6), + new Movie("movies/WhatDreamsMayCome", 5), new Movie("movies/WhenHarryMetSally", 4), + new Movie("movies/YouveGotMail", 6)); + } + + /** + * @see AQL + * Example Queries on an Actors and Movies Database + */ + @Test + void theNumberOfMoviesByActor() { + final ArangoCursor cursor = db.query( + "FOR x IN actsIn COLLECT actor = x._from WITH COUNT INTO counter RETURN {actor: actor, movies: " + + "counter}", Actor.class); + assertThat(cursor.asListRemaining()) + .contains(new Actor("actors/Al", 1), new Actor("actors/AnnabellaS", 1), new Actor("actors/AnthonyE", 1), + new Actor("actors/BillPull", 1), new Actor("actors/BillyC", 1), new Actor("actors/BonnieH", 1), + new Actor("actors/BrunoK", 1), new Actor("actors/Carrie", 3), new Actor("actors/CarrieF", 1), + new Actor("actors/Charlize", 1), new Actor("actors/ChristopherG", 1), new Actor("actors" + + "/CoreyF", 1), + new Actor("actors/CubaG", 4), new Actor("actors/DaveC", 1), new Actor("actors/DemiM", 1), + new Actor("actors/Emil", 1), new Actor("actors/EthanH", 1), new Actor("actors/GregK", 2), + new Actor("actors/HelenH", 1), new Actor("actors/Hugo", 3), new Actor("actors/JackN", 2), + new Actor("actors/JamesC", 1), new Actor("actors/JamesM", 1), new Actor("actors/JayM", 1), + new Actor("actors/JerryO", 2), new Actor("actors/JohnC", 1), new Actor("actors/JonathanL", 1), + new Actor("actors/JTW", 1), new Actor("actors/Keanu", 4), new Actor("actors/KellyM", 1), + new Actor("actors/KellyP", 1), new Actor("actors/KevinB", 1), new Actor("actors/KevinP", 1), + new Actor("actors/KieferS", 2), new Actor("actors/Laurence", 3), new Actor("actors/MarshallB" + , 1), + new Actor("actors/MaxS", 2), new Actor("actors/MegR", 5), new Actor("actors/Nathan", 1), + new Actor("actors/NoahW", 1), new Actor("actors/ParkerP", 1), new Actor("actors/ReginaK", 1), + new Actor("actors/ReneeZ", 1), new Actor("actors/RickY", 1), new Actor("actors/RitaW", 1), + new Actor("actors/RiverP", 1), new Actor("actors/Robin", 1), new Actor("actors/RosieO", 1), + new Actor("actors/SteveZ", 1), new Actor("actors/TomC", 3), new Actor("actors/TomH", 3), + new Actor("actors/TomS", 1), new Actor("actors/ValK", 1), new Actor("actors/VictorG", 1), + new Actor("actors/WernerH", 1), new Actor("actors/WilW", 1)); + } + + @Test + void theNumberOfMoviesActedInBetween2005and2010byActor() { + final ArangoCursor cursor = db.query( + "FOR x IN actsIn FILTER x.year >= 1990 && x.year <= 1995 COLLECT actor = x._from WITH COUNT INTO " + + "counter RETURN {actor: actor, movies: counter}", Actor.class); + assertThat(cursor.asListRemaining()) + .contains(new Actor("actors/BillPull", 1), new Actor("actors/ChristopherG", 1), new Actor("actors" + + "/CubaG", 1), + new Actor("actors/DemiM", 1), new Actor("actors/JackN", 1), new Actor("actors/JamesM", 1), + new Actor("actors/JTW", 1), new Actor("actors/KevinB", 1), new Actor("actors/KieferS", 1), + new Actor("actors/MegR", 2), new Actor("actors/Nathan", 1), new Actor("actors/NoahW", 1), + new Actor("actors/RitaW", 1), new Actor("actors/RosieO", 1), new Actor("actors/TomC", 1), + new Actor("actors/TomH", 2), new Actor("actors/VictorG", 1)); + } + + public static class Actor { + private final String actor; + private final Integer movies; + + @JsonCreator + Actor(@JsonProperty("actor") final String actor, @JsonProperty("movies") final Integer movies) { + super(); + this.actor = actor; + this.movies = movies; + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + ((actor == null) ? 0 : actor.hashCode()); + result = prime * result + ((movies == null) ? 0 : movies.hashCode()); + return result; + } + + @Override + public boolean equals(final Object obj) { + if (this == obj) { + return true; + } + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + final Actor other = (Actor) obj; + if (actor == null) { + if (other.actor != null) { + return false; + } + } else if (!actor.equals(other.actor)) { + return false; + } + if (movies == null) { + return other.movies == null; + } else return movies.equals(other.movies); + } + + } + + public static class Movie { + private final String movie; + private final Integer actors; + + @JsonCreator + public Movie(@JsonProperty("movie") final String movie, @JsonProperty("actors") final Integer actors) { + super(); + this.movie = movie; + this.actors = actors; + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + ((actors == null) ? 0 : actors.hashCode()); + result = prime * result + ((movie == null) ? 0 : movie.hashCode()); + return result; + } + + @Override + public boolean equals(final Object obj) { + if (this == obj) { + return true; + } + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + final Movie other = (Movie) obj; + if (actors == null) { + if (other.actors != null) { + return false; + } + } else if (!actors.equals(other.actors)) { + return false; + } + if (movie == null) { + return other.movie == null; + } else return movie.equals(other.movie); + } + + } + +} diff --git a/test-non-functional/src/test/java/example/graph/BaseGraphTest.java b/test-non-functional/src/test/java/example/graph/BaseGraphTest.java new file mode 100644 index 000000000..134885b00 --- /dev/null +++ b/test-non-functional/src/test/java/example/graph/BaseGraphTest.java @@ -0,0 +1,117 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package example.graph; + +import com.arangodb.ArangoDB; +import com.arangodb.ArangoDBException; +import com.arangodb.ArangoDatabase; +import com.arangodb.Protocol; +import com.arangodb.config.ArangoConfigProperties; +import com.arangodb.entity.EdgeDefinition; +import com.arangodb.entity.VertexEntity; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import util.TestUtils; + +import java.util.ArrayList; +import java.util.Collection; + +/** + * @author Mark Vollmary + */ +abstract class BaseGraphTest { + + private static final String TEST_DB = "java_driver_graph_test_db"; + private static final String GRAPH_NAME = "traversalGraph"; + private static final String EDGE_COLLECTION_NAME = "edges"; + private static final String VERTEX_COLLECTION_NAME = "circles"; + static ArangoDatabase db; + private static ArangoDB arangoDB; + + @BeforeAll + static void init() { + if (arangoDB == null) { + ArangoConfigProperties config = ArangoConfigProperties.fromFile(); + arangoDB = new ArangoDB.Builder() + .loadProperties(config) + .serde(TestUtils.createSerde(config.getProtocol().orElse(Protocol.HTTP2_JSON))) + .build(); + } + if (arangoDB.db(TEST_DB).exists()) + arangoDB.db(TEST_DB).drop(); + arangoDB.createDatabase(TEST_DB); + BaseGraphTest.db = arangoDB.db(TEST_DB); + + final Collection edgeDefinitions = new ArrayList<>(); + final EdgeDefinition edgeDefinition = new EdgeDefinition().collection(EDGE_COLLECTION_NAME) + .from(VERTEX_COLLECTION_NAME).to(VERTEX_COLLECTION_NAME); + edgeDefinitions.add(edgeDefinition); + if (!db.graph(GRAPH_NAME).exists()) + db.createGraph(GRAPH_NAME, edgeDefinitions, null); + addExampleElements(); + } + + @AfterAll + static void shutdown() { + arangoDB.db(TEST_DB).drop(); + arangoDB.shutdown(); + arangoDB = null; + } + + private static void addExampleElements() throws ArangoDBException { + + // Add circle circles + final VertexEntity vA = createVertex(new Circle("A", "1")); + final VertexEntity vB = createVertex(new Circle("B", "2")); + final VertexEntity vC = createVertex(new Circle("C", "3")); + final VertexEntity vD = createVertex(new Circle("D", "4")); + final VertexEntity vE = createVertex(new Circle("E", "5")); + final VertexEntity vF = createVertex(new Circle("F", "6")); + final VertexEntity vG = createVertex(new Circle("G", "7")); + final VertexEntity vH = createVertex(new Circle("H", "8")); + final VertexEntity vI = createVertex(new Circle("I", "9")); + final VertexEntity vJ = createVertex(new Circle("J", "10")); + final VertexEntity vK = createVertex(new Circle("K", "11")); + + // Add relevant edges - left branch: + saveEdge(new CircleEdge(vA.getId(), vB.getId(), false, true, "left_bar")); + saveEdge(new CircleEdge(vB.getId(), vC.getId(), false, true, "left_blarg")); + saveEdge(new CircleEdge(vC.getId(), vD.getId(), false, true, "left_blorg")); + saveEdge(new CircleEdge(vB.getId(), vE.getId(), false, true, "left_blub")); + saveEdge(new CircleEdge(vE.getId(), vF.getId(), false, true, "left_schubi")); + + // Add relevant edges - right branch: + saveEdge(new CircleEdge(vA.getId(), vG.getId(), false, true, "right_foo")); + saveEdge(new CircleEdge(vG.getId(), vH.getId(), false, true, "right_blob")); + saveEdge(new CircleEdge(vH.getId(), vI.getId(), false, true, "right_blub")); + saveEdge(new CircleEdge(vG.getId(), vJ.getId(), false, true, "right_zip")); + saveEdge(new CircleEdge(vJ.getId(), vK.getId(), false, true, "right_zup")); + } + + private static void saveEdge(final CircleEdge edge) throws ArangoDBException { + db.graph(GRAPH_NAME).edgeCollection(EDGE_COLLECTION_NAME).insertEdge(edge); + } + + private static VertexEntity createVertex(final Circle vertex) throws ArangoDBException { + return db.graph(GRAPH_NAME).vertexCollection(VERTEX_COLLECTION_NAME).insertVertex(vertex); + } + +} diff --git a/test-non-functional/src/test/java/example/graph/Circle.java b/test-non-functional/src/test/java/example/graph/Circle.java new file mode 100644 index 000000000..a607998aa --- /dev/null +++ b/test-non-functional/src/test/java/example/graph/Circle.java @@ -0,0 +1,80 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package example.graph; + +import com.arangodb.serde.jackson.Id; +import com.arangodb.serde.jackson.Key; +import com.arangodb.serde.jackson.Rev; + +/** + * @author a-brandt + */ +class Circle { + + @Id + private String id; + + @Key + private String key; + + @Rev + private String revision; + + private String label; + + public Circle(String key, String label) { + this.key = key; + this.label = label; + } + + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } + + public String getKey() { + return key; + } + + public void setKey(String key) { + this.key = key; + } + + public String getRevision() { + return revision; + } + + public void setRevision(String revision) { + this.revision = revision; + } + + public String getLabel() { + return label; + } + + public void setLabel(String label) { + this.label = label; + } + +} diff --git a/test-non-functional/src/test/java/example/graph/CircleEdge.java b/test-non-functional/src/test/java/example/graph/CircleEdge.java new file mode 100644 index 000000000..72c3a4afa --- /dev/null +++ b/test-non-functional/src/test/java/example/graph/CircleEdge.java @@ -0,0 +1,122 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package example.graph; + +import com.arangodb.serde.jackson.*; + +/** + * @author a-brandt + */ +class CircleEdge { + + @Id + private String id; + + @Key + private String key; + + @Rev + private String revision; + + @From + private String from; + + @To + private String to; + + private Boolean theFalse; + private Boolean theTruth; + private String label; + + public CircleEdge(final String from, final String to, final Boolean theFalse, final Boolean theTruth, + final String label) { + this.from = from; + this.to = to; + this.theFalse = theFalse; + this.theTruth = theTruth; + this.label = label; + } + + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } + + public String getKey() { + return key; + } + + public void setKey(String key) { + this.key = key; + } + + public String getRevision() { + return revision; + } + + public void setRevision(String revision) { + this.revision = revision; + } + + public String getFrom() { + return from; + } + + public void setFrom(String from) { + this.from = from; + } + + public String getTo() { + return to; + } + + public void setTo(String to) { + this.to = to; + } + + public Boolean getTheFalse() { + return theFalse; + } + + public void setTheFalse(Boolean theFalse) { + this.theFalse = theFalse; + } + + public Boolean getTheTruth() { + return theTruth; + } + + public void setTheTruth(Boolean theTruth) { + this.theTruth = theTruth; + } + + public String getLabel() { + return label; + } + + public void setLabel(String label) { + this.label = label; + } + +} diff --git a/test-non-functional/src/test/java/example/graph/GraphTraversalsInAQLExampleTest.java b/test-non-functional/src/test/java/example/graph/GraphTraversalsInAQLExampleTest.java new file mode 100644 index 000000000..f02411d70 --- /dev/null +++ b/test-non-functional/src/test/java/example/graph/GraphTraversalsInAQLExampleTest.java @@ -0,0 +1,113 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package example.graph; + +import com.arangodb.ArangoCursor; +import com.arangodb.ArangoDBException; +import org.junit.jupiter.api.Test; + +import java.util.Collection; + +import static org.assertj.core.api.Assertions.assertThat; + + +/** + * Graph traversals in AQL + * + * @author a-brandt + * @see Graph traversals in AQL + */ +class GraphTraversalsInAQLExampleTest extends BaseGraphTest { + + @Test + void queryAllVertices() throws ArangoDBException { + String queryString = "FOR v IN 1..3 OUTBOUND 'circles/A' GRAPH 'traversalGraph' RETURN v._key"; + ArangoCursor cursor = db.query(queryString, String.class); + Collection result = cursor.asListRemaining(); + assertThat(result).hasSize(10); + + queryString = "WITH circles FOR v IN 1..3 OUTBOUND 'circles/A' edges RETURN v._key"; + cursor = db.query(queryString, String.class); + result = cursor.asListRemaining(); + assertThat(result).hasSize(10); + } + + @Test + void queryDepthTwo() throws ArangoDBException { + String queryString = "FOR v IN 2..2 OUTBOUND 'circles/A' GRAPH 'traversalGraph' return v._key"; + ArangoCursor cursor = db.query(queryString, String.class); + Collection result = cursor.asListRemaining(); + assertThat(result).containsExactlyInAnyOrder("C", "E", "H", "J"); + + queryString = "FOR v IN 2 OUTBOUND 'circles/A' GRAPH 'traversalGraph' return v._key"; + cursor = db.query(queryString, String.class); + result = cursor.asListRemaining(); + assertThat(result).containsExactlyInAnyOrder("C", "E", "H", "J"); + } + + @Test + void queryWithFilter() throws ArangoDBException { + String queryString = "FOR v, e, p IN 1..3 OUTBOUND 'circles/A' GRAPH 'traversalGraph' FILTER p.vertices[1]" + + "._key != 'G' RETURN v._key"; + ArangoCursor cursor = db.query(queryString, String.class); + Collection result = cursor.asListRemaining(); + assertThat(result).containsExactlyInAnyOrder("B", "C", "D", "E", "F"); + + queryString = "FOR v, e, p IN 1..3 OUTBOUND 'circles/A' GRAPH 'traversalGraph' FILTER p.edges[0].label != " + + "'right_foo' RETURN v._key"; + cursor = db.query(queryString, String.class); + result = cursor.asListRemaining(); + assertThat(result).containsExactlyInAnyOrder("B", "C", "D", "E", "F"); + + queryString = "FOR v,e,p IN 1..3 OUTBOUND 'circles/A' GRAPH 'traversalGraph' FILTER p.vertices[1]._key != 'G'" + + " FILTER p.edges[1].label != 'left_blub' return v._key"; + cursor = db.query(queryString, String.class); + + result = cursor.asListRemaining(); + assertThat(result).containsExactlyInAnyOrder("B", "C", "D"); + + queryString = "FOR v,e,p IN 1..3 OUTBOUND 'circles/A' GRAPH 'traversalGraph' FILTER p.vertices[1]._key != 'G'" + + " AND p.edges[1].label != 'left_blub' return v._key"; + cursor = db.query(queryString, String.class); + result = cursor.asListRemaining(); + assertThat(result).containsExactlyInAnyOrder("B", "C", "D"); + } + + @Test + void queryOutboundInbound() throws ArangoDBException { + String queryString = "FOR v IN 1..3 OUTBOUND 'circles/E' GRAPH 'traversalGraph' return v._key"; + ArangoCursor cursor = db.query(queryString, String.class); + Collection result = cursor.asListRemaining(); + assertThat(result).containsExactlyInAnyOrder("F"); + + queryString = "FOR v IN 1..3 INBOUND 'circles/E' GRAPH 'traversalGraph' return v._key"; + cursor = db.query(queryString, String.class); + result = cursor.asListRemaining(); + assertThat(result).containsExactlyInAnyOrder("B", "A"); + + queryString = "FOR v IN 1..3 ANY 'circles/E' GRAPH 'traversalGraph' return v._key"; + cursor = db.query(queryString, String.class); + + result = cursor.asListRemaining(); + assertThat(result).containsExactlyInAnyOrder("F", "B", "C", "D", "A", "G"); + } + +} diff --git a/test-non-functional/src/test/java/example/graph/ShortestPathInAQLExampleTest.java b/test-non-functional/src/test/java/example/graph/ShortestPathInAQLExampleTest.java new file mode 100644 index 000000000..f5ecf8ef1 --- /dev/null +++ b/test-non-functional/src/test/java/example/graph/ShortestPathInAQLExampleTest.java @@ -0,0 +1,105 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package example.graph; + +import com.arangodb.ArangoCursor; +import com.arangodb.ArangoDBException; +import org.junit.jupiter.api.Test; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; + +import static org.assertj.core.api.Assertions.assertThat; + +/** + * Shortest Path in AQL + * + * @author a-brandt + * @see Shortest Path in AQL + */ +class ShortestPathInAQLExampleTest extends BaseGraphTest { + + @Test + void queryShortestPathFromAToD() throws ArangoDBException { + String queryString = "FOR v, e IN OUTBOUND SHORTEST_PATH 'circles/A' TO 'circles/D' GRAPH 'traversalGraph' " + + "RETURN {'vertex': v._key, 'edge': e._key}"; + ArangoCursor cursor = db.query(queryString, Pair.class); + final Collection collection = toVertexCollection(cursor); + assertThat(collection.size()).isEqualTo(4); + assertThat(collection).containsExactlyInAnyOrder("A", "B", "C", "D"); + + queryString = "WITH circles FOR v, e IN OUTBOUND SHORTEST_PATH 'circles/A' TO 'circles/D' edges RETURN " + + "{'vertex': v._key, 'edge': e._key}"; + db.query(queryString, Pair.class); + assertThat(collection.size()).isEqualTo(4); + assertThat(collection).containsExactlyInAnyOrder("A", "B", "C", "D"); + } + + @Test + void queryShortestPathByFilter() throws ArangoDBException { + String queryString = "FOR a IN circles FILTER a._key == 'A' FOR d IN circles FILTER d._key == 'D' FOR v, e IN" + + " OUTBOUND SHORTEST_PATH a TO d GRAPH 'traversalGraph' RETURN {'vertex':v._key, 'edge':e._key}"; + ArangoCursor cursor = db.query(queryString, Pair.class); + final Collection collection = toVertexCollection(cursor); + assertThat(collection.size()).isEqualTo(4); + assertThat(collection).containsExactlyInAnyOrder("A", "B", "C", "D"); + + queryString = "FOR a IN circles FILTER a._key == 'A' FOR d IN circles FILTER d._key == 'D' FOR v, e IN " + + "OUTBOUND SHORTEST_PATH a TO d edges RETURN {'vertex': v._key, 'edge': e._key}"; + db.query(queryString, Pair.class); + assertThat(collection.size()).isEqualTo(4); + assertThat(collection).containsExactlyInAnyOrder("A", "B", "C", "D"); + } + + private Collection toVertexCollection(final ArangoCursor cursor) { + final List result = new ArrayList<>(); + while (cursor.hasNext()) { + final Pair pair = cursor.next(); + result.add(pair.getVertex()); + } + return result; + } + + public static class Pair { + + private String vertex; + private String edge; + + String getVertex() { + return vertex; + } + + void setVertex(final String vertex) { + this.vertex = vertex; + } + + String getEdge() { + return edge; + } + + void setEdge(final String edge) { + this.edge = edge; + } + + } + +} diff --git a/test-non-functional/src/test/java/mp/ArangoConfigPropertiesMPImpl.java b/test-non-functional/src/test/java/mp/ArangoConfigPropertiesMPImpl.java new file mode 100644 index 000000000..136869308 --- /dev/null +++ b/test-non-functional/src/test/java/mp/ArangoConfigPropertiesMPImpl.java @@ -0,0 +1,201 @@ +package mp; + +import com.arangodb.Compression; +import com.arangodb.Protocol; +import com.arangodb.config.ArangoConfigProperties; +import com.arangodb.config.HostDescription; +import com.arangodb.entity.LoadBalancingStrategy; + +import java.util.List; +import java.util.Objects; +import java.util.Optional; + +/** + * Implementation of ArangoConfigProperties compatible with MicroProfile Config. + */ +public final class ArangoConfigPropertiesMPImpl implements ArangoConfigProperties { + private Optional> hosts; + private Optional protocol; + private Optional user; + private Optional password; + private Optional jwt; + private Optional timeout; + private Optional useSsl; + private Optional sslCertValue; + private Optional sslAlgorithm; + private Optional sslProtocol; + private Optional verifyHost; + private Optional chunkSize; + private Optional pipelining; + private Optional maxConnections; + private Optional connectionTtl; + private Optional keepAliveInterval; + private Optional acquireHostList; + private Optional acquireHostListInterval; + private Optional loadBalancingStrategy; + private Optional responseQueueTimeSamples; + private Optional compression; + private Optional compressionThreshold; + private Optional compressionLevel; + private Optional serdeProviderClass; + + @Override + public Optional> getHosts() { + return hosts; + } + + @Override + public Optional getProtocol() { + return protocol; + } + + @Override + public Optional getUser() { + return user; + } + + @Override + public Optional getPassword() { + return password; + } + + @Override + public Optional getJwt() { + return jwt; + } + + @Override + public Optional getTimeout() { + return timeout; + } + + @Override + public Optional getUseSsl() { + return useSsl; + } + + @Override + public Optional getSslCertValue() { + return sslCertValue; + } + + @Override + public Optional getSslAlgorithm() { + return sslAlgorithm; + } + + @Override + public Optional getSslProtocol() { + return sslProtocol; + } + + @Override + public Optional getVerifyHost() { + return verifyHost; + } + + @Override + public Optional getChunkSize() { + return chunkSize; + } + + @Override + public Optional getPipelining() { + return pipelining; + } + + @Override + public Optional getMaxConnections() { + return maxConnections; + } + + @Override + public Optional getConnectionTtl() { + return connectionTtl; + } + + @Override + public Optional getKeepAliveInterval() { + return keepAliveInterval; + } + + @Override + public Optional getAcquireHostList() { + return acquireHostList; + } + + @Override + public Optional getAcquireHostListInterval() { + return acquireHostListInterval; + } + + @Override + public Optional getLoadBalancingStrategy() { + return loadBalancingStrategy; + } + + @Override + public Optional getResponseQueueTimeSamples() { + return responseQueueTimeSamples; + } + + @Override + public Optional getCompression() { + return compression; + } + + @Override + public Optional getCompressionThreshold() { + return compressionThreshold; + } + + @Override + public Optional getCompressionLevel() { + return compressionLevel; + } + + @Override + public Optional getSerdeProviderClass() { + return serdeProviderClass; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + ArangoConfigPropertiesMPImpl that = (ArangoConfigPropertiesMPImpl) o; + return Objects.equals(hosts, that.hosts) && Objects.equals(protocol, that.protocol) && Objects.equals(user, that.user) && Objects.equals(password, that.password) && Objects.equals(jwt, that.jwt) && Objects.equals(timeout, that.timeout) && Objects.equals(useSsl, that.useSsl) && Objects.equals(verifyHost, that.verifyHost) && Objects.equals(chunkSize, that.chunkSize) && Objects.equals(pipelining, that.pipelining) && Objects.equals(maxConnections, that.maxConnections) && Objects.equals(connectionTtl, that.connectionTtl) && Objects.equals(keepAliveInterval, that.keepAliveInterval) && Objects.equals(acquireHostList, that.acquireHostList) && Objects.equals(acquireHostListInterval, that.acquireHostListInterval) && Objects.equals(loadBalancingStrategy, that.loadBalancingStrategy) && Objects.equals(responseQueueTimeSamples, that.responseQueueTimeSamples) && Objects.equals(compression, that.compression) && Objects.equals(compressionThreshold, that.compressionThreshold) && Objects.equals(compressionLevel, that.compressionLevel) && Objects.equals(serdeProviderClass, that.serdeProviderClass); + } + + @Override + public int hashCode() { + return Objects.hash(hosts, protocol, user, password, jwt, timeout, useSsl, verifyHost, chunkSize, pipelining, maxConnections, connectionTtl, keepAliveInterval, acquireHostList, acquireHostListInterval, loadBalancingStrategy, responseQueueTimeSamples, compression, compressionThreshold, compressionLevel, serdeProviderClass); + } + + @Override + public String toString() { + return "ArangoConfigPropertiesMPImpl{" + + "hosts=" + hosts + + ", protocol=" + protocol + + ", user=" + user + + ", password=" + password + + ", jwt=" + jwt + + ", timeout=" + timeout + + ", useSsl=" + useSsl + + ", verifyHost=" + verifyHost + + ", chunkSize=" + chunkSize + + ", pipelining=" + pipelining + + ", maxConnections=" + maxConnections + + ", connectionTtl=" + connectionTtl + + ", keepAliveInterval=" + keepAliveInterval + + ", acquireHostList=" + acquireHostList + + ", acquireHostListInterval=" + acquireHostListInterval + + ", loadBalancingStrategy=" + loadBalancingStrategy + + ", responseQueueTimeSamples=" + responseQueueTimeSamples + + ", compression=" + compression + + ", compressionThreshold=" + compressionThreshold + + ", compressionLevel=" + compressionLevel + + ", serdeProviderClass=" + serdeProviderClass + + '}'; + } +} diff --git a/test-non-functional/src/test/java/mp/ConfigMPDefaultsTest.java b/test-non-functional/src/test/java/mp/ConfigMPDefaultsTest.java new file mode 100644 index 000000000..ad98ea4e9 --- /dev/null +++ b/test-non-functional/src/test/java/mp/ConfigMPDefaultsTest.java @@ -0,0 +1,44 @@ +package mp; + +import com.arangodb.config.ArangoConfigProperties; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.condition.DisabledInNativeImage; + +import static org.assertj.core.api.Assertions.assertThat; + +@DisabledInNativeImage +class ConfigMPDefaultsTest { + + @Test + void defaultValues() { + ArangoConfigProperties config = ConfigUtilsMP.loadConfigMP("arangodb.properties", "wrong"); + checkResult(config); + } + + private void checkResult(ArangoConfigProperties config) { + assertThat(config.getHosts()).isEmpty(); + assertThat(config.getProtocol()).isEmpty(); + assertThat(config.getUser()).isEmpty(); + assertThat(config.getPassword()).isNotPresent(); + assertThat(config.getJwt()).isNotPresent(); + assertThat(config.getTimeout()).isEmpty(); + assertThat(config.getUseSsl()).isEmpty(); + assertThat(config.getSslCertValue()).isEmpty(); + assertThat(config.getSslAlgorithm()).isEmpty(); + assertThat(config.getSslProtocol()).isEmpty(); + assertThat(config.getVerifyHost()).isEmpty(); + assertThat(config.getChunkSize()).isEmpty(); + assertThat(config.getPipelining()).isEmpty(); + assertThat(config.getMaxConnections()).isNotPresent(); + assertThat(config.getConnectionTtl()).isNotPresent(); + assertThat(config.getKeepAliveInterval()).isNotPresent(); + assertThat(config.getAcquireHostList()).isEmpty(); + assertThat(config.getAcquireHostListInterval()).isEmpty(); + assertThat(config.getLoadBalancingStrategy()).isEmpty(); + assertThat(config.getResponseQueueTimeSamples()).isEmpty(); + assertThat(config.getCompression()).isEmpty(); + assertThat(config.getCompressionThreshold()).isNotPresent(); + assertThat(config.getCompressionLevel()).isNotPresent(); + } + +} diff --git a/test-non-functional/src/test/java/mp/ConfigMPTest.java b/test-non-functional/src/test/java/mp/ConfigMPTest.java new file mode 100644 index 000000000..38a556be0 --- /dev/null +++ b/test-non-functional/src/test/java/mp/ConfigMPTest.java @@ -0,0 +1,87 @@ +package mp; + +import com.arangodb.Compression; +import com.arangodb.Protocol; +import com.arangodb.config.ArangoConfigProperties; +import com.arangodb.config.HostDescription; +import com.arangodb.entity.LoadBalancingStrategy; +import org.assertj.core.api.InstanceOfAssertFactories; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.condition.DisabledInNativeImage; + +import static org.assertj.core.api.Assertions.assertThat; + +@DisabledInNativeImage +class ConfigMPTest { + private final HostDescription hostA = new HostDescription("aaa", 1111); + private final HostDescription hostB = new HostDescription("bbb", 2222); + private final Protocol protocol = Protocol.HTTP_VPACK; + private final String user = "testUser"; + private final String password = "testPassword"; + private final String jwt = "testJwt"; + private final Integer timeout = 9876; + private final Boolean useSsl = true; + private final String sslCertValue = "sslCertValue"; + private final String sslAlgorithm = "sslAlgorithm"; + private final String sslProtocol = "sslProtocol"; + private final Boolean verifyHost = false; + private final Integer vstChunkSize = 1234; + private final Boolean pipelining = true; + private final Integer maxConnections = 123; + private final Long connectionTtl = 12345L; + private final Integer keepAliveInterval = 123456; + private final Boolean acquireHostList = true; + private final Integer acquireHostListInterval = 1234567; + private final LoadBalancingStrategy loadBalancingStrategy = LoadBalancingStrategy.ROUND_ROBIN; + private final Integer responseQueueTimeSamples = 12345678; + private final Compression compression = Compression.GZIP; + private final Integer compressionThreshold = 123456789; + private final Integer compressionLevel = 9; + private final String serdeProviderClass = "com.arangodb.serde.jsonb.JsonbSerdeProvider"; + + @Test + void readConfig() { + ArangoConfigProperties config = ConfigUtilsMP.loadConfigMP("arangodb-config-test.properties", "adb"); + checkResult(config); + } + + private void checkResult(ArangoConfigProperties config) { + assertThat(config.getHosts()) + .isPresent() + .get(InstanceOfAssertFactories.LIST) + .containsExactly(hostA, hostB); + assertThat(config.getProtocol()).hasValue(protocol); + assertThat(config.getUser()).hasValue(user); + assertThat(config.getPassword()) + .isPresent() + .hasValue(password); + assertThat(config.getJwt()) + .isPresent() + .hasValue(jwt); + assertThat(config.getTimeout()).hasValue(timeout); + assertThat(config.getUseSsl()).hasValue(useSsl); + assertThat(config.getSslCertValue()).hasValue(sslCertValue); + assertThat(config.getSslAlgorithm()).hasValue(sslAlgorithm); + assertThat(config.getSslProtocol()).hasValue(sslProtocol); + assertThat(config.getVerifyHost()).hasValue(verifyHost); + assertThat(config.getChunkSize()).hasValue(vstChunkSize); + assertThat(config.getPipelining()).hasValue(pipelining); + assertThat(config.getMaxConnections()) + .isPresent() + .hasValue(maxConnections); + assertThat(config.getConnectionTtl()) + .isPresent() + .hasValue(connectionTtl); + assertThat(config.getKeepAliveInterval()) + .isPresent() + .hasValue(keepAliveInterval); + assertThat(config.getAcquireHostList()).hasValue(acquireHostList); + assertThat(config.getAcquireHostListInterval()).hasValue(acquireHostListInterval); + assertThat(config.getLoadBalancingStrategy()).hasValue(loadBalancingStrategy); + assertThat(config.getResponseQueueTimeSamples()).hasValue(responseQueueTimeSamples); + assertThat(config.getCompression()).hasValue(compression); + assertThat(config.getCompressionThreshold()).hasValue(compressionThreshold); + assertThat(config.getCompressionLevel()).hasValue(compressionLevel); + assertThat(config.getSerdeProviderClass()).isPresent().hasValue(serdeProviderClass); + } +} diff --git a/test-non-functional/src/test/java/mp/ConfigUtilsMP.java b/test-non-functional/src/test/java/mp/ConfigUtilsMP.java new file mode 100644 index 000000000..07277115f --- /dev/null +++ b/test-non-functional/src/test/java/mp/ConfigUtilsMP.java @@ -0,0 +1,26 @@ +package mp; + +import com.arangodb.config.ArangoConfigProperties; +import io.smallrye.config.PropertiesConfigSourceLoader; +import io.smallrye.config.SmallRyeConfig; +import io.smallrye.config.SmallRyeConfigBuilder; + +public class ConfigUtilsMP { + + public static ArangoConfigProperties loadConfigMP() { + return loadConfigMP("arangodb.properties"); + } + + public static ArangoConfigProperties loadConfigMP(final String location) { + return loadConfigMP(location, "arangodb"); + } + + public static ArangoConfigProperties loadConfigMP(final String location, final String prefix) { + SmallRyeConfig cfg = new SmallRyeConfigBuilder() + .withSources(PropertiesConfigSourceLoader.inClassPath(location, 0, ConfigUtilsMP.class.getClassLoader())) + .withMapping(ArangoConfigPropertiesMPImpl.class, prefix) + .build(); + return cfg.getConfigMapping(ArangoConfigPropertiesMPImpl.class, prefix); + } + +} diff --git a/test-non-functional/src/test/java/perf/Benchmark.java b/test-non-functional/src/test/java/perf/Benchmark.java new file mode 100644 index 000000000..8fade9e17 --- /dev/null +++ b/test-non-functional/src/test/java/perf/Benchmark.java @@ -0,0 +1,123 @@ +package perf; + +import java.util.Date; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.atomic.AtomicInteger; + +public abstract class Benchmark { + + private static final int SYNC_THREADS = 128; + private final CountDownLatch completed = new CountDownLatch(1); + private volatile Long startTime = null; + private volatile Long endTime = null; + private volatile int targetCount = Integer.MAX_VALUE; + private final AtomicInteger counter = new AtomicInteger(); + private final ExecutorService es = Executors.newFixedThreadPool(SYNC_THREADS); + private final int warmupDurationSeconds; + private final int numberOfRequests; + + public Benchmark(int warmupDurationSeconds, int numberOfRequests) { + this.warmupDurationSeconds = warmupDurationSeconds; + this.numberOfRequests = numberOfRequests; + } + + public void run() { + // warmup + startBenchmark(); + + // start monitor / warmup + startMonitor(); + + // start benchmark + startMeasuring(); + } + + private void startMonitor() { + for (int i = 0; i < warmupDurationSeconds; i++) { + counter.set(0); + long start = new Date().getTime(); + try { + Thread.sleep(1_000); + } catch (InterruptedException e) { + e.printStackTrace(); + } + + long current = new Date().getTime(); + long elapsed = current - start; + double reqsPerSec = 1_000.0 * counter.get() / elapsed; + System.out.println("reqs/s: \t" + reqsPerSec); + } + } + + private void startBenchmark() { + start(); + new Thread(() -> { + try { + completed.await(); + // wait graceful shutdown + Thread.sleep(1_000); + } catch (InterruptedException e) { + e.printStackTrace(); + } + // force shutdown + es.shutdown(); + shutdown(); + }).start(); + } + + private void startMeasuring() { + counter.set(0); + targetCount = numberOfRequests; + startTime = System.currentTimeMillis(); + } + + public long waitComplete() { + try { + completed.await(); + } catch (InterruptedException e) { + e.printStackTrace(); + } + return endTime - startTime; + } + + /** + * @return req/s + */ + public long getThroughput() { + return targetCount * 1000L / (endTime - startTime); + } + + /** + * notify the success of #count requests + * + * @return whether more requests should be performed + */ + private boolean success() { + if (endTime != null) return false; + if (counter.addAndGet(1) >= targetCount) { + endTime = System.currentTimeMillis(); + completed.countDown(); + return false; + } + return true; + } + + private void start() { + for (int i = 0; i < SYNC_THREADS; i++) { + es.execute(() -> { + boolean more = true; + while (more) { + sendRequest(); + more = success(); + } + }); + } + } + + protected abstract void sendRequest(); + + protected abstract void shutdown(); + +} diff --git a/test-non-functional/src/test/java/perf/SimpleAsyncPerfTest.java b/test-non-functional/src/test/java/perf/SimpleAsyncPerfTest.java new file mode 100644 index 000000000..16cf43cfc --- /dev/null +++ b/test-non-functional/src/test/java/perf/SimpleAsyncPerfTest.java @@ -0,0 +1,79 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package perf; + +import com.arangodb.ArangoDB; +import com.arangodb.ArangoDBAsync; +import com.arangodb.Protocol; +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.EnumSource; + +import java.util.Date; +import java.util.concurrent.atomic.AtomicInteger; + +/** + * @author Michele Rastelli + */ +@Disabled +class SimpleAsyncPerfTest { + private static final int TARGET = 500_000; + private static final int MAX_PENDING_REQUESTS = 500; + + private void doGetVersion(ArangoDBAsync arangoDB) throws InterruptedException { + AtomicInteger pendingReqsCount = new AtomicInteger(); + AtomicInteger completed = new AtomicInteger(); + + while (completed.get() < TARGET) { + pendingReqsCount.incrementAndGet(); + arangoDB.getVersion() + .thenAccept(it -> { + pendingReqsCount.decrementAndGet(); + completed.incrementAndGet(); + }); + while (pendingReqsCount.get() > MAX_PENDING_REQUESTS) { + Thread.sleep(5); + } + } + } + + @ParameterizedTest + @EnumSource(Protocol.class) + void getVersion(Protocol protocol) throws InterruptedException { + ArangoDBAsync arangoDB = new ArangoDB.Builder() + .host("172.28.0.1", 8529) + .password("test") + .protocol(protocol) + .build() + .async(); + // warmup + doGetVersion(arangoDB); + + long start = new Date().getTime(); + doGetVersion(arangoDB); + long end = new Date().getTime(); + long elapsedMs = end - start; + System.out.println("elapsed ms: " + elapsedMs); + long reqPerSec = TARGET * 1_000 / elapsedMs; + System.out.println("req/s: " + reqPerSec); + System.out.println("---"); + } +} diff --git a/test-non-functional/src/test/java/perf/SimpleSyncPerfTest.java b/test-non-functional/src/test/java/perf/SimpleSyncPerfTest.java new file mode 100644 index 000000000..4e18b71f4 --- /dev/null +++ b/test-non-functional/src/test/java/perf/SimpleSyncPerfTest.java @@ -0,0 +1,61 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package perf; + +import com.arangodb.ArangoDB; +import com.arangodb.Protocol; +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.EnumSource; + +import java.util.Date; + +/** + * @author Michele Rastelli + */ +@Disabled +class SimpleSyncPerfTest { + private static final int REPETITIONS = 50_000; + + private void doGetVersion(ArangoDB arangoDB) { + for (int i = 0; i < REPETITIONS; i++) { + arangoDB.getVersion(); + } + } + + @ParameterizedTest + @EnumSource(Protocol.class) + void getVersion(Protocol protocol) throws InterruptedException { + ArangoDB arangoDB = new ArangoDB.Builder() + .host("172.28.0.1", 8529) + .password("test") + .protocol(protocol) + .build(); + // warmup + doGetVersion(arangoDB); + + long start = new Date().getTime(); + doGetVersion(arangoDB); + long end = new Date().getTime(); + System.out.println("elapsed ms: " + (end - start)); + Thread.sleep(5000); + } +} diff --git a/test-non-functional/src/test/java/perf/SyncBenchmarkTest.java b/test-non-functional/src/test/java/perf/SyncBenchmarkTest.java new file mode 100644 index 000000000..cc7651c9b --- /dev/null +++ b/test-non-functional/src/test/java/perf/SyncBenchmarkTest.java @@ -0,0 +1,81 @@ +package perf; + +import com.arangodb.ArangoDB; +import com.arangodb.Protocol; +import com.arangodb.Request; +import com.arangodb.internal.ArangoRequestParam; +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.EnumSource; + +@Disabled +public class SyncBenchmarkTest { + private final int warmupDurationSeconds = 15; + private final int numberOfRequests = 1_000_000; + + @ParameterizedTest + @EnumSource(Protocol.class) + void getVersion(Protocol protocol) { + System.out.println("-----------------------------------------"); + System.out.println("--- getVersion(): " + protocol); + System.out.println("-----------------------------------------"); + + ArangoDB adb = new ArangoDB.Builder() + .host("172.28.0.1", 8529) + .password("test") + .protocol(protocol) + .maxConnections(16) + .build(); + Benchmark benchmark = new Benchmark(warmupDurationSeconds, numberOfRequests) { + @Override + protected void sendRequest() { + adb.getVersion(); + } + + @Override + protected void shutdown() { + adb.shutdown(); + } + }; + benchmark.run(); + System.out.println("elapsed time [ms]: \t" + benchmark.waitComplete()); + System.out.println("throughput [req/s]: \t" + benchmark.getThroughput()); + } + + @ParameterizedTest + @EnumSource(Protocol.class) + void getVersionWithDetails(Protocol protocol) { + System.out.println("-----------------------------------------"); + System.out.println("--- getVersion w/ details: " + protocol); + System.out.println("-----------------------------------------"); + + ArangoDB adb = new ArangoDB.Builder() + .host("172.28.0.1", 8529) + .password("test") + .protocol(protocol) + .maxConnections(16) + .build(); + Benchmark benchmark = new Benchmark(warmupDurationSeconds, numberOfRequests) { + private final Request request = Request.builder() + .db(ArangoRequestParam.SYSTEM) + .method(Request.Method.GET) + .path("/_api/version") + .queryParam("details", "true") + .build(); + + @Override + protected void sendRequest() { + adb.execute(request, Void.class); + } + + @Override + protected void shutdown() { + adb.shutdown(); + } + }; + benchmark.run(); + System.out.println("elapsed time [ms]: \t" + benchmark.waitComplete()); + System.out.println("throughput [req/s]: \t" + benchmark.getThroughput()); + } + +} diff --git a/test-non-functional/src/test/java/serde/InternalSerdePerson.java b/test-non-functional/src/test/java/serde/InternalSerdePerson.java new file mode 100644 index 000000000..9e2f3238a --- /dev/null +++ b/test-non-functional/src/test/java/serde/InternalSerdePerson.java @@ -0,0 +1,12 @@ +package serde; + + +import com.arangodb.serde.InternalKey; + +public record InternalSerdePerson( + @InternalKey + String key, + String name, + int age +) { +} diff --git a/test-non-functional/src/test/java/serde/InternalSerdeTest.java b/test-non-functional/src/test/java/serde/InternalSerdeTest.java new file mode 100644 index 000000000..e6d2d5ac6 --- /dev/null +++ b/test-non-functional/src/test/java/serde/InternalSerdeTest.java @@ -0,0 +1,81 @@ +package serde; + +import com.arangodb.ArangoDB; +import com.arangodb.ContentType; +import com.arangodb.Protocol; +import com.arangodb.config.ArangoConfigProperties; +import com.arangodb.internal.serde.InternalSerdeProvider; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.node.JsonNodeFactory; +import com.arangodb.util.RawJson; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; + +import java.util.Collections; +import java.util.Map; +import java.util.stream.Stream; + +import static org.assertj.core.api.Assertions.assertThat; + +class InternalSerdeTest { + + static Stream adbByContentType() { + return Stream.of(ContentType.values()) + .map(ct -> new ArangoDB.Builder() + .loadProperties(ArangoConfigProperties.fromFile()) + .protocol(ContentType.VPACK.equals(ct) ? Protocol.HTTP2_VPACK : Protocol.HTTP2_JSON) + .serde(new InternalSerdeProvider(ct).create()) + .build()) + .map(Arguments::of); + } + + @ParameterizedTest + @MethodSource("adbByContentType") + void shadedJsonNode(ArangoDB adb) { + // uses the internal serde + JsonNode doc = JsonNodeFactory.instance + .objectNode() + .put("foo", "bar"); + JsonNode res = adb.db().query("return @d", JsonNode.class, Collections.singletonMap("d", doc)).next(); + assertThat(res.size()).isEqualTo(1); + assertThat(res.get("foo").asText()).isEqualTo("bar"); + JsonNode value = adb.db().query("return @d.foo", JsonNode.class, Collections.singletonMap("d", doc)).next(); + assertThat(value.textValue()).isEqualTo("bar"); + } + + @ParameterizedTest + @MethodSource("adbByContentType") + void map(ArangoDB adb) { + Map doc = Collections.singletonMap("foo", "bar"); + Map res = adb.db().query("return @d", Map.class, Collections.singletonMap("d", doc)).next(); + assertThat(res).hasSize(1); + assertThat(res.get("foo")).isEqualTo("bar"); + String value = adb.db().query("return @d.foo", String.class, Collections.singletonMap("d", doc)).next(); + assertThat(value).isEqualTo("bar"); + } + + @ParameterizedTest + @MethodSource("adbByContentType") + void rawJson(ArangoDB adb) { + RawJson doc = RawJson.of(""" + {"foo":"bar"}"""); + RawJson res = adb.db().query("return @d", RawJson.class, Collections.singletonMap("d", doc)).next(); + assertThat(res.get()).isEqualTo(doc.get()); + RawJson value = adb.db().query("return @d.foo", RawJson.class, Collections.singletonMap("d", doc)).next(); + assertThat(value.get()).isEqualTo("\"bar\""); + } + + @ParameterizedTest + @MethodSource("adbByContentType") + void person(ArangoDB adb) { + InternalSerdePerson doc = new InternalSerdePerson("key", "Jim", 22); + InternalSerdePerson res = adb.db().query("return @d", InternalSerdePerson.class, Collections.singletonMap("d", doc)).next(); + assertThat(res).isEqualTo(doc); + String key = adb.db().query("return @d._key", String.class, Collections.singletonMap("d", doc)).next(); + assertThat(key).isEqualTo("key"); + String name = adb.db().query("return @d.name", String.class, Collections.singletonMap("d", doc)).next(); + assertThat(name).isEqualTo("Jim"); + } + +} diff --git a/test-non-functional/src/test/java/serde/JacksonPerson.java b/test-non-functional/src/test/java/serde/JacksonPerson.java new file mode 100644 index 000000000..e9589403d --- /dev/null +++ b/test-non-functional/src/test/java/serde/JacksonPerson.java @@ -0,0 +1,13 @@ +package serde; + +import com.arangodb.serde.jackson.Key; +import com.fasterxml.jackson.annotation.JsonProperty; + +public record JacksonPerson( + @Key + String key, + @JsonProperty("firstName") + String name, + int age +) { +} diff --git a/test-non-functional/src/test/java/serde/JacksonSerdeTest.java b/test-non-functional/src/test/java/serde/JacksonSerdeTest.java new file mode 100644 index 000000000..1a1a5db27 --- /dev/null +++ b/test-non-functional/src/test/java/serde/JacksonSerdeTest.java @@ -0,0 +1,95 @@ +package serde; + +import com.arangodb.ArangoDB; +import com.arangodb.ContentType; +import com.arangodb.Protocol; +import com.arangodb.config.ArangoConfigProperties; +import com.arangodb.serde.jackson.JacksonSerde; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.node.JsonNodeFactory; +import com.arangodb.util.RawJson; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; + +import java.util.Collections; +import java.util.Map; +import java.util.stream.Stream; + +import static org.assertj.core.api.Assertions.assertThat; + +class JacksonSerdeTest { + + static Stream adbByContentType() { + return Stream.of(ContentType.values()) + .map(ct -> new ArangoDB.Builder() + .loadProperties(ArangoConfigProperties.fromFile()) + .protocol(ContentType.VPACK.equals(ct) ? Protocol.HTTP2_VPACK : Protocol.HTTP2_JSON) + .serde(JacksonSerde.of(ct)) + .build()) + .map(Arguments::of); + } + + @ParameterizedTest + @MethodSource("adbByContentType") + void shadedJsonNode(ArangoDB adb) { + // uses the internal serde + JsonNode doc = JsonNodeFactory.instance + .objectNode() + .put("foo", "bar"); + JsonNode res = adb.db().query("return @d", JsonNode.class, Collections.singletonMap("d", doc)).next(); + assertThat(res.size()).isEqualTo(1); + assertThat(res.get("foo").asText()).isEqualTo("bar"); + JsonNode value = adb.db().query("return @d.foo", JsonNode.class, Collections.singletonMap("d", doc)).next(); + assertThat(value.textValue()).isEqualTo("bar"); + } + + @ParameterizedTest + @MethodSource("adbByContentType") + void jsonNode(ArangoDB adb) { + // uses the user serde + com.fasterxml.jackson.databind.JsonNode doc = com.fasterxml.jackson.databind.node.JsonNodeFactory.instance + .objectNode() + .put("foo", "bar"); + com.fasterxml.jackson.databind.JsonNode res = adb.db().query("return @d", com.fasterxml.jackson.databind.JsonNode.class, Collections.singletonMap("d", doc)).next(); + assertThat(res.size()).isEqualTo(1); + assertThat(res.get("foo").asText()).isEqualTo("bar"); + com.fasterxml.jackson.databind.JsonNode value = adb.db().query("return @d.foo", com.fasterxml.jackson.databind.JsonNode.class, Collections.singletonMap("d", doc)).next(); + assertThat(value.textValue()).isEqualTo("bar"); + } + + @ParameterizedTest + @MethodSource("adbByContentType") + void map(ArangoDB adb) { + Map doc = Collections.singletonMap("foo", "bar"); + Map res = adb.db().query("return @d", Map.class, Collections.singletonMap("d", doc)).next(); + assertThat(res).hasSize(1); + assertThat(res.get("foo")).isEqualTo("bar"); + String value = adb.db().query("return @d.foo", String.class, Collections.singletonMap("d", doc)).next(); + assertThat(value).isEqualTo("bar"); + } + + @ParameterizedTest + @MethodSource("adbByContentType") + void rawJson(ArangoDB adb) { + RawJson doc = RawJson.of(""" + {"foo":"bar"}"""); + RawJson res = adb.db().query("return @d", RawJson.class, Collections.singletonMap("d", doc)).next(); + assertThat(res.get()).isEqualTo(doc.get()); + RawJson value = adb.db().query("return @d.foo", RawJson.class, Collections.singletonMap("d", doc)).next(); + assertThat(value.get()).isEqualTo("\"bar\""); + } + + @ParameterizedTest + @MethodSource("adbByContentType") + void person(ArangoDB adb) { + JacksonPerson doc = new JacksonPerson("key", "Jim", 22); + JacksonPerson res = adb.db().query("return @d", JacksonPerson.class, Collections.singletonMap("d", doc)).next(); + assertThat(res).isEqualTo(doc); + String key = adb.db().query("return @d._key", String.class, Collections.singletonMap("d", doc)).next(); + assertThat(key).isEqualTo("key"); + String name = adb.db().query("return @d.firstName", String.class, Collections.singletonMap("d", doc)).next(); + assertThat(name).isEqualTo("Jim"); + } + +} diff --git a/test-non-functional/src/test/java/serde/JsonBPerson.java b/test-non-functional/src/test/java/serde/JsonBPerson.java new file mode 100644 index 000000000..52598c229 --- /dev/null +++ b/test-non-functional/src/test/java/serde/JsonBPerson.java @@ -0,0 +1,61 @@ +package serde; + + +import com.arangodb.serde.jsonb.Key; +import jakarta.json.bind.annotation.JsonbProperty; + +import java.util.Objects; + +public class JsonBPerson { + @Key + private String key; + @JsonbProperty("firstName") + private String name; + private int age; + + public JsonBPerson() { + } + + public JsonBPerson(String key, String name, int age) { + this.key = key; + this.name = name; + this.age = age; + } + + public String getKey() { + return key; + } + + public void setKey(String key) { + this.key = key; + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public int getAge() { + return age; + } + + public void setAge(int age) { + this.age = age; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + JsonBPerson person = (JsonBPerson) o; + return age == person.age && Objects.equals(key, person.key) && Objects.equals(name, person.name); + } + + @Override + public int hashCode() { + return Objects.hash(key, name, age); + } +} \ No newline at end of file diff --git a/test-non-functional/src/test/java/serde/JsonBSerdeTest.java b/test-non-functional/src/test/java/serde/JsonBSerdeTest.java new file mode 100644 index 000000000..7fdc03f7c --- /dev/null +++ b/test-non-functional/src/test/java/serde/JsonBSerdeTest.java @@ -0,0 +1,78 @@ +package serde; + +import com.arangodb.ArangoDB; +import com.arangodb.config.ArangoConfigProperties; +import com.arangodb.serde.jsonb.JsonbSerdeProvider; +import com.arangodb.util.RawJson; +import jakarta.json.*; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; + +import java.util.Collections; +import java.util.Map; +import java.util.stream.Stream; + +import static org.assertj.core.api.Assertions.assertThat; + +class JsonBSerdeTest { + + static Stream adbByContentType() { + return Stream.of(new ArangoDB.Builder() + .loadProperties(ArangoConfigProperties.fromFile()) + .serde(new JsonbSerdeProvider().create()) + .build()) + .map(Arguments::of); + } + + @ParameterizedTest + @MethodSource("adbByContentType") + void jsonNode(ArangoDB adb) { + JsonObject doc = Json.createObjectBuilder() + .add("foo", "bar") + .build(); + JsonObject res = adb.db().query("return @d", JsonObject.class, Collections.singletonMap("d", doc)).next(); + assertThat(res.size()).isEqualTo(1); + assertThat(res.getString("foo")).isEqualTo("bar"); + JsonValue value = adb.db().query("return @d.foo", JsonValue.class, Collections.singletonMap("d", doc)).next(); + assertThat(value) + .isInstanceOf(JsonString.class) + .extracting(v -> ((JsonString) v).getString()) + .isEqualTo("bar"); + } + + @ParameterizedTest + @MethodSource("adbByContentType") + void map(ArangoDB adb) { + Map doc = Collections.singletonMap("foo", "bar"); + Map res = adb.db().query("return @d", Map.class, Collections.singletonMap("d", doc)).next(); + assertThat(res).hasSize(1); + assertThat(res.get("foo")).isEqualTo("bar"); + String value = adb.db().query("return @d.foo", String.class, Collections.singletonMap("d", doc)).next(); + assertThat(value).isEqualTo("bar"); + } + + @ParameterizedTest + @MethodSource("adbByContentType") + void rawJson(ArangoDB adb) { + RawJson doc = RawJson.of(""" + {"foo":"bar"}"""); + RawJson res = adb.db().query("return @d", RawJson.class, Collections.singletonMap("d", doc)).next(); + assertThat(res.get()).isEqualTo(doc.get()); + RawJson value = adb.db().query("return @d.foo", RawJson.class, Collections.singletonMap("d", doc)).next(); + assertThat(value.get()).isEqualTo("\"bar\""); + } + + @ParameterizedTest + @MethodSource("adbByContentType") + void person(ArangoDB adb) { + JsonBPerson doc = new JsonBPerson("key", "Jim", 22); + JsonBPerson res = adb.db().query("return @d", JsonBPerson.class, Collections.singletonMap("d", doc)).next(); + assertThat(res).isEqualTo(doc); + String key = adb.db().query("return @d._key", String.class, Collections.singletonMap("d", doc)).next(); + assertThat(key).isEqualTo("key"); + String name = adb.db().query("return @d.firstName", String.class, Collections.singletonMap("d", doc)).next(); + assertThat(name).isEqualTo("Jim"); + } + +} diff --git a/test-non-functional/src/test/java/serde/SerdeConfigurationTest.java b/test-non-functional/src/test/java/serde/SerdeConfigurationTest.java new file mode 100644 index 000000000..adb05f1c0 --- /dev/null +++ b/test-non-functional/src/test/java/serde/SerdeConfigurationTest.java @@ -0,0 +1,81 @@ +package serde; + +import com.arangodb.ArangoDB; +import com.arangodb.config.ArangoConfigProperties; +import com.arangodb.serde.ArangoSerde; +import com.arangodb.serde.jackson.internal.JacksonSerdeImpl; +import com.arangodb.serde.jackson.json.JacksonJsonSerdeProvider; +import com.arangodb.serde.jackson.vpack.JacksonVPackSerdeProvider; +import com.arangodb.serde.jsonb.JsonbSerde; +import com.arangodb.serde.jsonb.JsonbSerdeProvider; +import com.fasterxml.jackson.databind.ObjectMapper; +import org.junit.jupiter.api.Test; + +import java.lang.invoke.MethodHandles; +import java.lang.invoke.VarHandle; + +import static org.assertj.core.api.Assertions.assertThat; + +public class SerdeConfigurationTest { + private final VarHandle JACKSON_SERDE_IMPL_MAPPER; + { + try { + JACKSON_SERDE_IMPL_MAPPER = MethodHandles + .privateLookupIn(JacksonSerdeImpl.class, MethodHandles.lookup()) + .findVarHandle(JacksonSerdeImpl.class, "mapper", ObjectMapper.class); + } catch (NoSuchFieldException | IllegalAccessException e) { + throw new RuntimeException(e); + } + } + + @Test + void vpackSerdeProvider() { + ArangoDB adb = new ArangoDB.Builder() + .host("foo", 1111) + .serdeProviderClass(JacksonVPackSerdeProvider.class) + .build(); + + ArangoSerde serde = adb.getSerde().getUserSerde(); + assertThat(serde).isInstanceOf(JacksonSerdeImpl.class); + + ObjectMapper mapper = (ObjectMapper) JACKSON_SERDE_IMPL_MAPPER.get(serde); + assertThat(mapper.getFactory().getFormatName()).isEqualTo("Velocypack"); + } + + @Test + void jsonSerdeProvider() { + ArangoDB adb = new ArangoDB.Builder() + .host("foo", 1111) + .serdeProviderClass(JacksonJsonSerdeProvider.class) + .build(); + + ArangoSerde serde = adb.getSerde().getUserSerde(); + assertThat(serde).isInstanceOf(JacksonSerdeImpl.class); + + ObjectMapper mapper = (ObjectMapper) JACKSON_SERDE_IMPL_MAPPER.get(serde); + assertThat(mapper.getFactory().getFormatName()).isEqualTo("JSON"); + } + + + @Test + void jsonBSerdeProvider() { + ArangoDB adb = new ArangoDB.Builder() + .host("foo", 1111) + .serdeProviderClass(JsonbSerdeProvider.class) + .build(); + + ArangoSerde serde = adb.getSerde().getUserSerde(); + assertThat(serde).isInstanceOf(JsonbSerde.class); + } + + @Test + void jsonBSerdeProviderFromConfigFile() { + ArangoDB adb = new ArangoDB.Builder() + .loadProperties(ArangoConfigProperties.fromFile("arangodb-serde-provider.properties")) + .build(); + + ArangoSerde serde = adb.getSerde().getUserSerde(); + assertThat(serde).isInstanceOf(JsonbSerde.class); + } + +} diff --git a/test-non-functional/src/test/java/unicode/UnicodeUtilsTest.java b/test-non-functional/src/test/java/unicode/UnicodeUtilsTest.java new file mode 100644 index 000000000..2cb57a038 --- /dev/null +++ b/test-non-functional/src/test/java/unicode/UnicodeUtilsTest.java @@ -0,0 +1,70 @@ +package unicode; + +import com.arangodb.internal.util.EncodeUtils; +import com.arangodb.util.UnicodeUtils; +import org.graalvm.home.Version; +import org.graalvm.polyglot.Context; +import org.graalvm.polyglot.Value; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; +import util.TestUtils; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + + +class UnicodeUtilsTest { + + private static final String encodeFn = "(function encode(x){return encodeURIComponent(x);})"; + private static final String normalizeFn = "(function normalize(x){return x.normalize('NFC');})"; + private static Context context; + private static Value jsEncoder; + private static Value jsNormalizer; + + @BeforeAll + static void beforeClass() { + assumeTrue(Version.getCurrent().isRelease(), "This test requires GraalVM"); + context = Context.create(); + jsEncoder = context.eval("js", encodeFn); + jsNormalizer = context.eval("js", normalizeFn); + } + + @AfterAll + static void afterClass() { + if (context != null) + context.close(); + } + + @Test + void normalizeShouldBehaveAsJs() { + for (int i = 0; i < 10_000; i++) { + String value = TestUtils.generateRandomName(true, 100); + String jsNormalized = jsNormalizer.execute(value).as(String.class); + String javaNormalized = UnicodeUtils.normalize(value); + assertThat(javaNormalized).isEqualTo(jsNormalized); + } + } + + @Test + void encodeURIComponentShouldBehaveAsJs() { + for (int i = 0; i < 10_000; i++) { + String value = TestUtils.generateRandomName(true, 100); + String jsEncoded = jsEncoder.execute(value).as(String.class); + String driverJavaEncoded = EncodeUtils.encodeURIComponent(value); + assertThat(driverJavaEncoded).isEqualTo(jsEncoded); + } + } + + @Test + void normalize() { + String normalized = UnicodeUtils.normalize("\u006E\u0303\u00f1"); + assertThat(normalized).isEqualTo("\u00f1\u00f1"); + } + + @Test + void isNormalized() { + assertThat(UnicodeUtils.isNormalized("π”Έπ•£π•’π•Ÿπ•˜π• π”»π”Ή")).isTrue(); + assertThat(UnicodeUtils.isNormalized("\u006E\u0303\u00f1")).isFalse(); + } +} \ No newline at end of file diff --git a/test-non-functional/src/test/java/util/TestUtils.java b/test-non-functional/src/test/java/util/TestUtils.java new file mode 100644 index 000000000..9a5dbc3b9 --- /dev/null +++ b/test-non-functional/src/test/java/util/TestUtils.java @@ -0,0 +1,123 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + + +package util; + + +import com.arangodb.ContentType; +import com.arangodb.Protocol; +import com.arangodb.serde.ArangoSerde; +import com.arangodb.serde.jackson.JacksonSerde; +import com.arangodb.util.UnicodeUtils; + +import java.util.ArrayList; +import java.util.List; +import java.util.Random; +import java.util.UUID; + +/** + * @author Michele Rastelli + */ +public final class TestUtils { + public static final String TEST_DB = "java_driver_test_db"; + private static final String[] allChars = TestUtils.generateAllInputChars(); + private static final Random r = new Random(); + + private TestUtils() { + } + + public static ArangoSerde createSerde(Protocol protocol) { + return switch (protocol) { + case VST, HTTP_VPACK, HTTP2_VPACK -> JacksonSerde.of(ContentType.VPACK); + case HTTP_JSON, HTTP2_JSON -> JacksonSerde.of(ContentType.JSON); + }; + } + + /** + * Parses {@param version} and checks whether it is greater or equal to <{@param otherMajor}, {@param otherMinor}, + * {@param otherPatch}> comparing the corresponding version components in lexicographical order. + */ + public static boolean isAtLeastVersion(final String version, final int otherMajor, final int otherMinor, + final int otherPatch) { + return compareVersion(version, otherMajor, otherMinor, otherPatch) >= 0; + } + + /** + * Parses {@param version} and checks whether it is less than <{@param otherMajor}, {@param otherMinor}, + * {@param otherPatch}> comparing the corresponding version components in lexicographical order. + */ + public static boolean isLessThanVersion(final String version, final int otherMajor, final int otherMinor, + final int otherPatch) { + return compareVersion(version, otherMajor, otherMinor, otherPatch) < 0; + } + + private static int compareVersion(final String version, final int otherMajor, final int otherMinor, + final int otherPatch) { + String[] parts = version.split("-")[0].split("\\."); + + int major = Integer.parseInt(parts[0]); + int minor = Integer.parseInt(parts[1]); + int patch = Integer.parseInt(parts[2]); + + int majorComparison = Integer.compare(major, otherMajor); + if (majorComparison != 0) { + return majorComparison; + } + + int minorComparison = Integer.compare(minor, otherMinor); + if (minorComparison != 0) { + return minorComparison; + } + + return Integer.compare(patch, otherPatch); + } + + private static String[] generateAllInputChars() { + List list = new ArrayList<>(); + for (int codePoint = 0; codePoint < Character.MAX_CODE_POINT + 1; codePoint++) { + String s = new String(Character.toChars(codePoint)); + if (codePoint == 47 || // '/' + codePoint == 58 || // ':' + Character.isISOControl(codePoint) || + Character.isLowSurrogate(s.charAt(0)) || + (Character.isHighSurrogate(s.charAt(0)) && s.length() == 1)) { + continue; + } + list.add(s); + } + return list.toArray(new String[0]); + } + + public static String generateRandomName(boolean extendedNames, int length) { + if (extendedNames) { + int max = allChars.length; + StringBuilder sb = new StringBuilder(); + for (int i = 0; i < length; i++) { + String allChar = allChars[r.nextInt(max)]; + sb.append(allChar); + } + return UnicodeUtils.normalize(sb.toString()); + } else { + return UUID.randomUUID().toString(); + } + } + +} diff --git a/test-non-functional/src/test/resources/arangodb-config-test.properties b/test-non-functional/src/test/resources/arangodb-config-test.properties new file mode 100644 index 000000000..251b348ae --- /dev/null +++ b/test-non-functional/src/test/resources/arangodb-config-test.properties @@ -0,0 +1,24 @@ +adb.hosts=aaa:1111,bbb:2222 +adb.protocol=HTTP_VPACK +adb.user=testUser +adb.password=testPassword +adb.jwt=testJwt +adb.timeout=9876 +adb.useSsl=true +adb.sslCertValue=sslCertValue +adb.sslAlgorithm=sslAlgorithm +adb.sslProtocol=sslProtocol +adb.verifyHost=false +adb.chunkSize=1234 +adb.pipelining=true +adb.maxConnections=123 +adb.connectionTtl=12345 +adb.keepAliveInterval=123456 +adb.acquireHostList=true +adb.acquireHostListInterval=1234567 +adb.loadBalancingStrategy=ROUND_ROBIN +adb.responseQueueTimeSamples=12345678 +adb.compression=GZIP +adb.compressionThreshold=123456789 +adb.compressionLevel=9 +adb.serdeProviderClass=com.arangodb.serde.jsonb.JsonbSerdeProvider diff --git a/test-non-functional/src/test/resources/arangodb-serde-provider.properties b/test-non-functional/src/test/resources/arangodb-serde-provider.properties new file mode 100644 index 000000000..560134c78 --- /dev/null +++ b/test-non-functional/src/test/resources/arangodb-serde-provider.properties @@ -0,0 +1,3 @@ +arangodb.hosts=172.28.0.1:8529 +arangodb.password=test +arangodb.serdeProviderClass=com.arangodb.serde.jsonb.JsonbSerdeProvider diff --git a/test-non-functional/src/test/resources/arangodb-with-prefix.properties b/test-non-functional/src/test/resources/arangodb-with-prefix.properties new file mode 100644 index 000000000..36fb2d0a5 --- /dev/null +++ b/test-non-functional/src/test/resources/arangodb-with-prefix.properties @@ -0,0 +1,3 @@ +adb.hosts=172.28.0.1:8529 +adb.acquireHostList=true +adb.password=test diff --git a/test-non-functional/src/test/resources/arangodb.properties b/test-non-functional/src/test/resources/arangodb.properties new file mode 100644 index 000000000..b9030c227 --- /dev/null +++ b/test-non-functional/src/test/resources/arangodb.properties @@ -0,0 +1,2 @@ +arangodb.hosts=172.28.0.1:8529 +arangodb.password=test diff --git a/test-non-functional/src/test/resources/simplelogger.properties b/test-non-functional/src/test/resources/simplelogger.properties new file mode 100644 index 000000000..495a73812 --- /dev/null +++ b/test-non-functional/src/test/resources/simplelogger.properties @@ -0,0 +1,14 @@ +org.slf4j.simpleLogger.logFile=System.out +org.slf4j.simpleLogger.showDateTime=true +org.slf4j.simpleLogger.dateTimeFormat=HH:mm:ss.SSS +org.slf4j.simpleLogger.showThreadName=true +org.slf4j.simpleLogger.showLogName=true +org.slf4j.simpleLogger.showShortLogName=false + + +org.slf4j.simpleLogger.defaultLogLevel=info +#org.slf4j.simpleLogger.log.com.arangodb.internal.serde.JacksonUtils=debug +#org.slf4j.simpleLogger.log.com.arangodb.internal.net.Communication=debug +#org.slf4j.simpleLogger.log.io.netty.handler.logging.LoggingHandler=debug +#org.slf4j.simpleLogger.log.io.netty.handler.codec.http2.Http2FrameLogger=debug +#org.slf4j.simpleLogger.log.com.arangodb.internal.util.AsyncQueue=trace diff --git a/test-parent/pom.xml b/test-parent/pom.xml new file mode 100644 index 000000000..51da228d6 --- /dev/null +++ b/test-parent/pom.xml @@ -0,0 +1,248 @@ + + + 4.0.0 + + + com.arangodb + arangodb-java-driver-parent + 7.22.0 + + pom + + test-parent + + + false + 2.19.0 + true + 17 + 17 + src/test/java + + + + + + com.arangodb + jackson-serde-vpack + compile + + + org.slf4j + slf4j-simple + test + + + org.junit.platform + junit-platform-launcher + test + + + org.junit.jupiter + junit-jupiter-api + test + + + org.junit.jupiter + junit-jupiter-engine + test + + + org.junit.jupiter + junit-jupiter-params + test + + + org.assertj + assertj-core + test + + + org.awaitility + awaitility + test + + + + + + + com.fasterxml.jackson + jackson-bom + ${adb.jackson.version} + import + pom + + + org.junit + junit-bom + 5.12.2 + pom + import + + + org.eclipse + yasson + 3.0.4 + + + org.slf4j + slf4j-simple + 2.0.17 + + + org.assertj + assertj-core + 3.27.3 + + + org.awaitility + awaitility + 4.3.0 + test + + + com.tngtech.archunit + archunit-junit5 + 1.4.1 + + + + + + ${testSourceDirectory} + + + + org.apache.maven.plugins + maven-surefire-plugin + + + true + ${shaded} + + + **/*Test.java + + + + + org.apache.maven.plugins + maven-failsafe-plugin + 3.5.3 + + + true + ${shaded} + + + **/*Test.java + + + + + + integration-test + verify + + + + + + + + + + default + + + shaded + !true + + + + + ${testSources} + false + + + + com.arangodb + arangodb-java-driver + compile + + + com.arangodb + vst-protocol + compile + + + + + shaded + + + shaded + true + + + + + ${project.build.directory}/generated-test-sources/replacer + true + + + + com.arangodb + arangodb-java-driver-shaded + compile + + + com.arangodb + jackson-serde-json + compile + + + + + + + com.google.code.maven-replacer-plugin + replacer + + + generate-test-sources + + replace + + + + + ${project.basedir}/${testSources} + ** + ${project.build.directory}/generated-test-sources + replacer + + + com.fasterxml + com.arangodb.shaded.fasterxml + + + io.vertx + com.arangodb.shaded.vertx + + + io.netty + com.arangodb.shaded.netty + + + + + + + + + + diff --git a/test-parent/src/test/java/.gitignore b/test-parent/src/test/java/.gitignore new file mode 100644 index 000000000..d6b7ef32c --- /dev/null +++ b/test-parent/src/test/java/.gitignore @@ -0,0 +1,2 @@ +* +!.gitignore diff --git a/test-perf/README.md b/test-perf/README.md new file mode 100644 index 000000000..2e35ccd9e --- /dev/null +++ b/test-perf/README.md @@ -0,0 +1,21 @@ +# Serde performance tests + +``` +mvn clean package -am -pl test-perf +java -cp test-perf/target/benchmarks.jar com.arangodb.SerdeBench +``` + +## 19/12/2024 + +- `main f613d3d6` +- `benchmark/base 1e45f8c4` + +``` +Benchmark Mode Cnt Score Score main/base +SerdeBench.deserializeDocsJson avgt 10 0.155 0.149 0.961290322580645 +SerdeBench.deserializeDocsVPack avgt 10 0.209 0.126 0.602870813397129 +SerdeBench.extractBytesJson avgt 10 2.705 0.297 0.109796672828096 +SerdeBench.extractBytesVPack avgt 10 1.12 0.133 0.11875 +SerdeBench.rawJsonDeser avgt 10 6.016 6.116 1.01662234042553 +SerdeBench.rawJsonSer avgt 10 7.711 7.222 0.936584100635456 +``` diff --git a/test-perf/pom.xml b/test-perf/pom.xml new file mode 100644 index 000000000..2f8b33903 --- /dev/null +++ b/test-perf/pom.xml @@ -0,0 +1,82 @@ + + + 4.0.0 + + ../test-parent + com.arangodb + test-parent + 7.22.0 + + + test-perf + + + 1.37 + benchmarks + + + + + org.slf4j + slf4j-simple + compile + + + org.openjdk.jmh + jmh-core + ${jmh.version} + compile + + + org.openjdk.jmh + jmh-generator-annprocess + ${jmh.version} + provided + + + + + + + org.apache.maven.plugins + maven-shade-plugin + + + package + + shade + + + ${uberjar.name} + + + org.openjdk.jmh.Main + + + + + + + *:* + + META-INF/*.SF + META-INF/*.DSA + META-INF/*.RSA + + + + + + + + + + + diff --git a/test-perf/src/main/java/com/arangodb/SerdeBench.java b/test-perf/src/main/java/com/arangodb/SerdeBench.java new file mode 100644 index 000000000..3577e96a0 --- /dev/null +++ b/test-perf/src/main/java/com/arangodb/SerdeBench.java @@ -0,0 +1,199 @@ +package com.arangodb; + +import com.arangodb.entity.MultiDocumentEntity; +import com.arangodb.internal.ArangoCollectionImpl; +import com.arangodb.internal.ArangoDatabaseImpl; +import com.arangodb.internal.ArangoExecutor; +import com.arangodb.internal.InternalResponse; +import com.arangodb.internal.serde.InternalSerde; +import com.arangodb.internal.serde.InternalSerdeProvider; +import com.arangodb.jackson.dataformat.velocypack.VPackMapper; +import com.arangodb.util.RawBytes; +import com.arangodb.util.RawJson; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.JsonNode; +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.BenchmarkMode; +import org.openjdk.jmh.annotations.Fork; +import org.openjdk.jmh.annotations.Measurement; +import org.openjdk.jmh.annotations.Mode; +import org.openjdk.jmh.annotations.OutputTimeUnit; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.Warmup; +import org.openjdk.jmh.infra.Blackhole; +import org.openjdk.jmh.profile.GCProfiler; +import org.openjdk.jmh.results.format.ResultFormatType; +import org.openjdk.jmh.runner.Runner; +import org.openjdk.jmh.runner.RunnerException; +import org.openjdk.jmh.runner.options.Options; +import org.openjdk.jmh.runner.options.OptionsBuilder; + +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.text.SimpleDateFormat; +import java.util.ArrayList; +import java.util.Date; +import java.util.concurrent.TimeUnit; + +@Warmup(iterations = 8, time = 1) +@Measurement(iterations = 10, time = 1) +@BenchmarkMode(Mode.AverageTime) +@OutputTimeUnit(TimeUnit.MILLISECONDS) +@Fork(1) +public class SerdeBench { + public static class MyCol extends ArangoCollectionImpl { + static ArangoDB jsonAdb = new ArangoDB.Builder() + .host("127.0.0.1", 8529) + .protocol(Protocol.HTTP_JSON) + .build(); + + static ArangoDB vpackAdb = new ArangoDB.Builder() + .host("127.0.0.1", 8529) + .protocol(Protocol.HTTP_VPACK) + .build(); + + private MyCol(ArangoDB adb) { + super((ArangoDatabaseImpl) adb.db(), "foo"); + } + + public static MyCol ofJson() { + return new MyCol(jsonAdb); + } + + public static MyCol ofVpack() { + return new MyCol(vpackAdb); + } + + @Override + public ArangoExecutor.ResponseDeserializer> getDocumentsResponseDeserializer(Class type) { + return super.getDocumentsResponseDeserializer(type); + } + } + + @State(Scope.Benchmark) + public static class Data { + public final byte[] vpack; + public final byte[] json; + public final RawBytes rawJsonBytes; + public final RawBytes rawVPackBytes; + public final RawJson rawJson; + public final MyCol jsonCol = MyCol.ofJson(); + public final MyCol vpackCol = MyCol.ofVpack(); + public final InternalResponse jsonResp = new InternalResponse(); + public final InternalResponse vpackResp = new InternalResponse(); + + public Data() { + ObjectMapper jsonMapper = new ObjectMapper(); + VPackMapper vpackMapper = new VPackMapper(); + + try { + JsonNode jn = readFile("/api-docs.json", jsonMapper); + json = jsonMapper.writeValueAsBytes(jn); + vpack = vpackMapper.writeValueAsBytes(jn); + rawJsonBytes = RawBytes.of(json); + rawVPackBytes = RawBytes.of(vpack); + rawJson = RawJson.of(jsonMapper.writeValueAsString(jsonMapper.readTree(json))); + + JsonNode docs = readFile("/multi-docs.json", jsonMapper); + jsonResp.setResponseCode(200); + jsonResp.setBody(jsonMapper.writeValueAsBytes(docs)); + vpackResp.setResponseCode(200); + vpackResp.setBody(vpackMapper.writeValueAsBytes(docs)); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + private JsonNode readFile(String filename, ObjectMapper mapper) throws IOException { + InputStream inputStream = SerdeBench.class.getResourceAsStream(filename); + String str = readFromInputStream(inputStream); + return mapper.readTree(str); + } + + private String readFromInputStream(InputStream inputStream) throws IOException { + StringBuilder resultStringBuilder = new StringBuilder(); + try (BufferedReader br = new BufferedReader(new InputStreamReader(inputStream))) { + String line; + while ((line = br.readLine()) != null) { + resultStringBuilder.append(line).append("\n"); + } + } + return resultStringBuilder.toString(); + } + } + + public static void main(String[] args) throws RunnerException, IOException { + String datetime = new SimpleDateFormat("yyyy-MM-dd_HH-mm-ss").format(new Date()); + Path target = Files.createDirectories(Paths.get("target", "jmh-result")); + + ArrayList jvmArgs = new ArrayList<>(); + jvmArgs.add("-Xms256m"); + jvmArgs.add("-Xmx256m"); + if (Integer.parseInt(System.getProperty("java.version").split("\\.")[0]) >= 11) { + jvmArgs.add("-XX:StartFlightRecording=filename=" + target.resolve(datetime + ".jfr") + ",settings=profile"); + } + + Options opt = new OptionsBuilder() + .include(SerdeBench.class.getSimpleName()) + .addProfiler(GCProfiler.class) + .jvmArgs(jvmArgs.toArray(new String[0])) + .resultFormat(ResultFormatType.JSON) + .result(target.resolve(datetime + ".json").toString()) + .build(); + + new Runner(opt).run(); + } + + @Benchmark + public void rawJsonDeser(Data data, Blackhole bh) { + InternalSerde serde = new InternalSerdeProvider(ContentType.VPACK).create(); + bh.consume( + serde.deserialize(data.vpack, RawJson.class) + ); + } + + @Benchmark + public void rawJsonSer(Data data, Blackhole bh) { + InternalSerde serde = new InternalSerdeProvider(ContentType.VPACK).create(); + bh.consume( + serde.serialize(data.rawJson) + ); + } + + @Benchmark + public void extractBytesVPack(Data data, Blackhole bh) { + InternalSerde serde = new InternalSerdeProvider(ContentType.VPACK).create(); + bh.consume( + serde.extract(data.vpack, "/definitions/put_api_simple_remove_by_example_opts") + ); + } + + @Benchmark + public void extractBytesJson(Data data, Blackhole bh) { + InternalSerde serde = new InternalSerdeProvider(ContentType.JSON).create(); + bh.consume( + serde.extract(data.json, "/definitions/put_api_simple_remove_by_example_opts") + ); + } + + @Benchmark + public void deserializeDocsJson(Data data, Blackhole bh) { + bh.consume( + data.jsonCol.getDocumentsResponseDeserializer(RawBytes.class).deserialize(data.jsonResp) + ); + } + + @Benchmark + public void deserializeDocsVPack(Data data, Blackhole bh) { + bh.consume( + data.vpackCol.getDocumentsResponseDeserializer(RawBytes.class).deserialize(data.vpackResp) + ); + } + +} diff --git a/test-perf/src/main/resources/api-docs.json b/test-perf/src/main/resources/api-docs.json new file mode 100644 index 000000000..d23f57331 --- /dev/null +++ b/test-perf/src/main/resources/api-docs.json @@ -0,0 +1,7377 @@ +{ + "basePath": "/", + "definitions": { + "JSA_get_api_collection_figures_rc_200": { + "properties": { + "count": { + "description": "The number of documents currently present in the collection.
", + "format": "int64", + "type": "integer" + }, + "figures": { + "$ref": "#/definitions/collection_figures" + }, + "journalSize": { + "description": "The maximal size of a journal or datafile in bytes.
", + "format": "int64", + "type": "integer" + } + }, + "required": [ + "count", + "journalSize" + ], + "type": "object", + "x-filename": "Collections - js/actions/_api/collection/app.js" + }, + "JSA_put_api_simple_any": { + "properties": { + "collection": { + "description": "The identifier or name of the collection to query.
Returns a JSON object with the document stored in the attribute document if the collection contains at least one document. If the collection is empty, the document attrbute contains null.
", + "type": "string" + } + }, + "required": [ + "collection" + ], + "type": "object", + "x-filename": "Simple Queries - js/actions/api-simple.js, arangod/RestHandler/RestSimpleHandler.cpp, arangod/RestHandler/RestSimpleQueryHandler.cpp" + }, + "JSA_put_api_simple_by_example": { + "properties": { + "collection": { + "description": "The name of the collection to query.
", + "type": "string" + }, + "example": { + "description": "The example document.
", + "type": "string" + }, + "limit": { + "description": "The maximal amount of documents to return. The skip is applied before the limit restriction. (optional)
", + "type": "string" + }, + "skip": { + "description": "The number of documents to skip in the query (optional).
", + "type": "string" + } + }, + "required": [ + "collection", + "example", + "skip", + "limit" + ], + "type": "object", + "x-filename": "Simple Queries - js/actions/api-simple.js, arangod/RestHandler/RestSimpleHandler.cpp, arangod/RestHandler/RestSimpleQueryHandler.cpp" + }, + "JSA_put_api_simple_first": { + "properties": { + "collection": { + "description": "the name of the collection
", + "type": "string" + }, + "count": { + "description": "the number of documents to return at most. Specifying count is optional. If it is not specified, it defaults to 1.
", + "type": "string" + } + }, + "required": [ + "collection" + ], + "type": "object", + "x-filename": "Simple Queries - js/actions/api-simple.js, arangod/RestHandler/RestSimpleHandler.cpp, arangod/RestHandler/RestSimpleQueryHandler.cpp" + }, + "JSA_put_api_simple_first_example": { + "properties": { + "collection": { + "description": "The name of the collection to query.
", + "type": "string" + }, + "example": { + "description": "The example document.
", + "type": "string" + } + }, + "required": [ + "collection", + "example" + ], + "type": "object", + "x-filename": "Simple Queries - js/actions/api-simple.js, arangod/RestHandler/RestSimpleHandler.cpp, arangod/RestHandler/RestSimpleQueryHandler.cpp" + }, + "JSA_put_api_simple_fulltext": { + "properties": { + "attribute": { + "description": "The attribute that contains the texts.
", + "type": "string" + }, + "collection": { + "description": "The name of the collection to query.
", + "type": "string" + }, + "index": { + "description": "The identifier of the fulltext-index to use.
", + "type": "string" + }, + "limit": { + "description": "The maximal amount of documents to return. The skip is applied before the limit restriction. (optional)
", + "type": "string" + }, + "query": { + "description": "The fulltext query. Please refer to [Fulltext queries](../SimpleQueries/FulltextQueries.html) for details.
", + "type": "string" + }, + "skip": { + "description": "The number of documents to skip in the query (optional).
", + "type": "string" + } + }, + "required": [ + "collection", + "attribute", + "query", + "skip", + "limit", + "index" + ], + "type": "object", + "x-filename": "Simple Queries - js/actions/api-simple.js, arangod/RestHandler/RestSimpleHandler.cpp, arangod/RestHandler/RestSimpleQueryHandler.cpp" + }, + "JSA_put_api_simple_last": { + "properties": { + "collection": { + "description": " the name of the collection
", + "type": "string" + }, + "count": { + "description": "the number of documents to return at most. Specifying count is optional. If it is not specified, it defaults to 1.
", + "format": "int64", + "type": "integer" + } + }, + "required": [ + "collection", + "count" + ], + "type": "object", + "x-filename": "Simple Queries - js/actions/api-simple.js, arangod/RestHandler/RestSimpleHandler.cpp, arangod/RestHandler/RestSimpleQueryHandler.cpp" + }, + "JSA_put_api_simple_near": { + "properties": { + "collection": { + "description": "The name of the collection to query.
", + "type": "string" + }, + "distance": { + "description": "If given, the attribute key used to return the distance to the given coordinate. (optional). If specified, distances are returned in meters.
", + "type": "string" + }, + "geo": { + "description": "If given, the identifier of the geo-index to use. (optional)
", + "type": "string" + }, + "latitude": { + "description": "The latitude of the coordinate.
", + "type": "string" + }, + "limit": { + "description": "The maximal amount of documents to return. The skip is applied before the limit restriction. The default is 100. (optional)
", + "type": "string" + }, + "longitude": { + "description": "The longitude of the coordinate.
", + "type": "string" + }, + "skip": { + "description": "The number of documents to skip in the query. (optional)
", + "type": "string" + } + }, + "required": [ + "collection", + "latitude", + "longitude", + "distance", + "skip", + "limit", + "geo" + ], + "type": "object", + "x-filename": "Simple Queries - js/actions/api-simple.js, arangod/RestHandler/RestSimpleHandler.cpp, arangod/RestHandler/RestSimpleQueryHandler.cpp" + }, + "JSA_put_api_simple_range": { + "properties": { + "attribute": { + "description": "The attribute path to check.
", + "type": "string" + }, + "closed": { + "description": "If true, use interval including left and right, otherwise exclude right, but include left.
", + "format": "", + "type": "boolean" + }, + "collection": { + "description": "The name of the collection to query.
", + "type": "string" + }, + "left": { + "description": "The lower bound.
", + "type": "string" + }, + "limit": { + "description": "The maximal amount of documents to return. The skip is applied before the limit restriction. (optional)
", + "format": "int64", + "type": "integer" + }, + "right": { + "description": "The upper bound.
", + "type": "string" + }, + "skip": { + "description": "The number of documents to skip in the query (optional).
", + "type": "string" + } + }, + "required": [ + "collection", + "attribute", + "left", + "right", + "closed", + "skip" + ], + "type": "object", + "x-filename": "Simple Queries - js/actions/api-simple.js, arangod/RestHandler/RestSimpleHandler.cpp, arangod/RestHandler/RestSimpleQueryHandler.cpp" + }, + "JSA_put_api_simple_remove_by_example": { + "properties": { + "collection": { + "description": "The name of the collection to remove from.
", + "type": "string" + }, + "example": { + "description": "An example document that all collection documents are compared against.
", + "type": "string" + }, + "options": { + "$ref": "#/definitions/put_api_simple_remove_by_example_opts" + } + }, + "required": [ + "collection", + "example" + ], + "type": "object", + "x-filename": "Simple Queries - js/actions/api-simple.js, arangod/RestHandler/RestSimpleHandler.cpp, arangod/RestHandler/RestSimpleQueryHandler.cpp" + }, + "JSA_put_api_simple_replace_by_example": { + "properties": { + "collection": { + "description": "The name of the collection to replace within.
", + "type": "string" + }, + "example": { + "description": "An example document that all collection documents are compared against.
", + "type": "string" + }, + "newValue": { + "description": "The replacement document that will get inserted in place of the \"old\" documents.
", + "type": "string" + }, + "options": { + "$ref": "#/definitions/put_api_simple_replace_by_example_options" + } + }, + "required": [ + "collection", + "example", + "newValue" + ], + "type": "object", + "x-filename": "Simple Queries - js/actions/api-simple.js, arangod/RestHandler/RestSimpleHandler.cpp, arangod/RestHandler/RestSimpleQueryHandler.cpp" + }, + "JSA_put_api_simple_update_by_example": { + "properties": { + "collection": { + "description": "The name of the collection to update within.
", + "type": "string" + }, + "example": { + "description": "An example document that all collection documents are compared against.
", + "type": "string" + }, + "newValue": { + "additionalProperties": {}, + "description": "A document containing all the attributes to update in the found documents.
", + "type": "object" + }, + "options": { + "$ref": "#/definitions/put_api_simple_update_by_example_options" + } + }, + "required": [ + "collection", + "example", + "newValue" + ], + "type": "object", + "x-filename": "Simple Queries - js/actions/api-simple.js, arangod/RestHandler/RestSimpleHandler.cpp, arangod/RestHandler/RestSimpleQueryHandler.cpp" + }, + "JSA_put_api_simple_within": { + "properties": { + "collection": { + "description": "The name of the collection to query.
", + "type": "string" + }, + "distance": { + "description": "If given, the attribute key used to return the distance to the given coordinate. (optional). If specified, distances are returned in meters.
", + "type": "string" + }, + "geo": { + "description": "If given, the identifier of the geo-index to use. (optional)
", + "type": "string" + }, + "latitude": { + "description": "The latitude of the coordinate.
", + "type": "string" + }, + "limit": { + "description": "The maximal amount of documents to return. The skip is applied before the limit restriction. The default is 100. (optional)
", + "type": "string" + }, + "longitude": { + "description": "The longitude of the coordinate.
", + "type": "string" + }, + "radius": { + "description": "The maximal radius (in meters).
", + "type": "string" + }, + "skip": { + "description": "The number of documents to skip in the query. (optional)
", + "type": "string" + } + }, + "required": [ + "collection", + "latitude", + "longitude", + "radius", + "distance", + "skip", + "limit", + "geo" + ], + "type": "object", + "x-filename": "Simple Queries - js/actions/api-simple.js, arangod/RestHandler/RestSimpleHandler.cpp, arangod/RestHandler/RestSimpleQueryHandler.cpp" + }, + "JSA_put_api_simple_within_rectangle": { + "properties": { + "collection": { + "description": "The name of the collection to query.
", + "type": "string" + }, + "geo": { + "description": "If given, the identifier of the geo-index to use. (optional)
", + "type": "string" + }, + "latitude1": { + "description": "The latitude of the first rectangle coordinate.
", + "type": "string" + }, + "latitude2": { + "description": "The latitude of the second rectangle coordinate.
", + "type": "string" + }, + "limit": { + "description": "The maximal amount of documents to return. The skip is applied before the limit restriction. The default is 100. (optional)
", + "type": "string" + }, + "longitude1": { + "description": "The longitude of the first rectangle coordinate.
", + "type": "string" + }, + "longitude2": { + "description": "The longitude of the second rectangle coordinate.
", + "type": "string" + }, + "skip": { + "description": "The number of documents to skip in the query. (optional)
", + "type": "string" + } + }, + "required": [ + "collection", + "latitude1", + "longitude1", + "latitude2", + "longitude2", + "skip", + "limit", + "geo" + ], + "type": "object", + "x-filename": "Simple Queries - js/actions/api-simple.js, arangod/RestHandler/RestSimpleHandler.cpp, arangod/RestHandler/RestSimpleQueryHandler.cpp" + }, + "JSF_HTTP_API_TRAVERSAL": { + "properties": { + "direction": { + "description": "direction for traversal
  • if set, must be either \"outbound\", \"inbound\", or \"any\"
  • if not set, the expander attribute must be specified
", + "type": "string" + }, + "edgeCollection": { + "description": "name of the collection that contains the edges.
", + "type": "string" + }, + "expander": { + "description": "body (JavaScript) code of custom expander function must be set if direction attribute is not set function signature: (config, vertex, path) -> array expander must return an array of the connections for vertex each connection is an object with the attributes edge and vertex
", + "type": "string" + }, + "filter": { + "description": "default is to include all nodes: body (JavaScript code) of custom filter function function signature: (config, vertex, path) -> mixed can return four different string values:
  • \"exclude\" -> this vertex will not be visited.
  • \"prune\" -> the edges of this vertex will not be followed.
  • \"\" or undefined -> visit the vertex and follow it's edges.
  • Array -> containing any combination of the above. If there is at least one \"exclude\" or \"prune\" respectivly is contained, it's effect will occur.
", + "type": "string" + }, + "graphName": { + "description": "name of the graph that contains the edges. Either edgeCollection or graphName has to be given. In case both values are set the graphName is prefered.
", + "type": "string" + }, + "init": { + "description": "body (JavaScript) code of custom result initialization function function signature: (config, result) -> void initialize any values in result with what is required
", + "type": "string" + }, + "itemOrder": { + "description": "item iteration order can be \"forward\" or \"backward\"
", + "type": "string" + }, + "maxDepth": { + "description": "ANDed with any existing filters visits only nodes in at most the given depth
", + "type": "string" + }, + "maxIterations": { + "description": "Maximum number of iterations in each traversal. This number can be set to prevent endless loops in traversal of cyclic graphs. When a traversal performs as many iterations as the maxIterations value, the traversal will abort with an error. If maxIterations is not set, a server-defined value may be used.
", + "type": "string" + }, + "minDepth": { + "description": "ANDed with any existing filters): visits only nodes in at least the given depth
", + "type": "string" + }, + "order": { + "description": "traversal order can be \"preorder\", \"postorder\" or \"preorder-expander\"
", + "type": "string" + }, + "sort": { + "description": "body (JavaScript) code of a custom comparison function for the edges. The signature of this function is (l, r) -> integer (where l and r are edges) and must return -1 if l is smaller than, +1 if l is greater than, and 0 if l and r are equal. The reason for this is the following: The order of edges returned for a certain vertex is undefined. This is because there is no natural order of edges for a vertex with multiple connected edges. To explicitly define the order in which edges on the vertex are followed, you can specify an edge comparator function with this attribute. Note that the value here has to be a string to conform to the JSON standard, which in turn is parsed as function body on the server side. Furthermore note that this attribute is only used for the standard expanders. If you use your custom expander you have to do the sorting yourself within the expander code.
", + "type": "string" + }, + "startVertex": { + "description": "id of the startVertex, e.g. \"users/foo\".
", + "type": "string" + }, + "strategy": { + "description": "traversal strategy can be \"depthfirst\" or \"breadthfirst\"
", + "type": "string" + }, + "uniqueness": { + "description": "specifies uniqueness for vertices and edges visited if set, must be an object like this:
\"uniqueness\": {\"vertices\": \"none\"|\"global\"|\"path\", \"edges\": \"none\"|\"global\"|\"path\"}
", + "type": "string" + }, + "visitor": { + "description": "body (JavaScript) code of custom visitor function function signature: (config, result, vertex, path, connected) -> void The visitor function can do anything, but its return value is ignored. To populate a result, use the result variable by reference. Note that the connected argument is only populated when the order attribute is set to \"preorder-expander\".
", + "type": "string" + } + }, + "required": [ + "startVertex" + ], + "type": "object", + "x-filename": "Graph Traversal - js/actions/api-traversal.js" + }, + "JSF_cluster_dispatcher_POST": { + "properties": { + "action": { + "description": "can be one of the following: - \"launch\": the cluster is launched for the first time, all data directories and log files are cleaned and created - \"shutdown\": the cluster is shut down, the additional property runInfo (see below) must be bound as well - \"relaunch\": the cluster is launched again, all data directories and log files are untouched and need to be there already - \"cleanup\": use this after a shutdown to remove all data in the data directories and all log files, use with caution - \"isHealthy\": checks whether or not the processes involved in the cluster are running or not. The additional property runInfo (see above) must be bound as well - \"upgrade\": performs an upgrade of a cluster, to this end, the agency is started, and then every server is once started with the \"--upgrade\" option, and then normally. Finally, the script \"verion-check.js\" is run on one of the coordinators for the cluster.
", + "type": "string" + }, + "clusterPlan": { + "additionalProperties": {}, + "description": "is a cluster plan (see JSF_cluster_planner_POST),
", + "type": "object" + }, + "myname": { + "description": "is the ID of this dispatcher, this is used to decide which commands are executed locally and which are forwarded to other dispatchers
", + "type": "string" + }, + "runInfo": { + "additionalProperties": {}, + "description": "this is needed for the \"shutdown\" and \"isHealthy\" actions only and should be the structure that \"launch\", \"relaunch\" or \"upgrade\" returned. It contains runtime information like process IDs.
", + "type": "object" + } + }, + "required": [ + "clusterPlan", + "myname", + "action" + ], + "type": "object", + "x-filename": "Cluster - js/actions/api-cluster.js" + }, + "JSF_general_graph_create_http_examples": { + "properties": { + "edgeDefinitions": { + "description": "An array of definitions for the edge
", + "type": "string" + }, + "name": { + "description": "Name of the graph.
", + "type": "string" + }, + "orphanCollections": { + "description": "An array of additional vertex collections.
", + "type": "string" + } + }, + "required": [ + "name" + ], + "type": "object", + "x-filename": "Graph - js/apps/system/_api/gharial/APP/gharial.js" + }, + "JSF_general_graph_edge_definition_add_http_examples": { + "properties": { + "collection": { + "description": "The name of the edge collection to be used.
", + "type": "string" + }, + "from": { + "description": "One or many vertex collections that can contain source vertices.
", + "items": { + "type": "string" + }, + "type": "array" + }, + "to": { + "description": "One or many edge collections that can contain target vertices.
", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "collection", + "from", + "to" + ], + "type": "object", + "x-filename": "Graph - js/apps/system/_api/gharial/APP/gharial.js" + }, + "JSF_general_graph_edge_definition_modify_http_examples": { + "properties": { + "collection": { + "description": "The name of the edge collection to be used.
", + "type": "string" + }, + "from": { + "description": "One or many vertex collections that can contain source vertices.
", + "items": { + "type": "string" + }, + "type": "array" + }, + "to": { + "description": "One or many edge collections that can contain target vertices.
", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "collection", + "from", + "to" + ], + "type": "object", + "x-filename": "Graph - js/apps/system/_api/gharial/APP/gharial.js" + }, + "JSF_get_api_database_new": { + "properties": { + "active": { + "description": "A Flag indicating whether the user account should be activated or not. The default value is true.
", + "format": "", + "type": "boolean" + }, + "extra": { + "additionalProperties": {}, + "description": "A JSON object with extra user information. The data contained in extra will be stored for the user but not be interpreted further by ArangoDB.
", + "type": "object" + }, + "name": { + "description": "Has to contain a valid database name.
", + "type": "string" + }, + "passwd": { + "description": "The user password as a string. If not specified, it will default to an empty string.
", + "type": "string" + }, + "username": { + "description": "The user name as a string. If users is not specified or does not contain any users, a default user root will be created with an empty string password. This ensures that the new database will be accessible after it is created.
", + "type": "string" + }, + "users": { + "description": "Has to be a list of user objects to initially create for the new database. Each user object can contain the following attributes:
", + "items": { + "$ref": "#/definitions/JSF_get_api_database_new_USERS" + }, + "type": "array" + } + }, + "required": [ + "name" + ], + "type": "object", + "x-filename": "Database - js/actions/api-database.js" + }, + "JSF_get_api_database_new_USERS": { + "description": "", + "properties": { + "active": { + "description": "if False the user won't be able to log into the database.
", + "type": "boolean" + }, + "passwd": { + "description": "Password for the user
", + "type": "string" + }, + "username": { + "description": "Loginname of the user to be created
", + "type": "string" + } + }, + "type": "object" + }, + "JSF_get_api_return_rc_200": { + "properties": { + "details": { + "additionalProperties": {}, + "description": "an optional JSON object with additional details. This is returned only if the details URL parameter is set to true in the request.
", + "type": "object" + }, + "server": { + "description": "will always contain arango
", + "type": "string" + }, + "version": { + "description": "the server version string. The string has the format \"major.*minor.*sub\". major and minor will be numeric, and sub may contain a number or a textual version.
", + "type": "string" + } + }, + "required": [ + "server", + "version" + ], + "type": "object", + "x-filename": "Administration - js/actions/_admin/app.js, js/actions/_admin/routing/app.js, js/actions/_admin/server/app.js, js/actions/_admin/database/app.js, arangod/RestHandler/RestShutdownHandler.cpp, arangod/RestHandler/RestAdminLogHandler.cpp, js/actions/api-tasks.js, js/actions/api-endpoint.js, arangod/RestHandler/RestVersionHandler.cpp, js/actions/api-system.js" + }, + "JSF_post_api_aqlfunction": { + "properties": { + "code": { + "description": "a string representation of the function body.
", + "type": "string" + }, + "isDeterministic": { + "description": "an optional boolean value to indicate that the function results are fully deterministic (function return value solely depends on the input value and return value is the same for repeated calls with same input). The isDeterministic attribute is currently not used but may be used later for optimisations.
", + "format": "", + "type": "boolean" + }, + "name": { + "description": "the fully qualified name of the user functions.
", + "type": "string" + } + }, + "required": [ + "name", + "code" + ], + "type": "object", + "x-filename": "AQL - arangod/RestHandler/RestQueryHandler.cpp, js/actions/api-aqlfunction.js, js/actions/api-explain.js, arangod/RestHandler/RestQueryCacheHandler.cpp" + }, + "JSF_post_api_collection": { + "properties": { + "doCompact": { + "description": "whether or not the collection will be compacted (default is true)
", + "format": "", + "type": "boolean" + }, + "indexBuckets": { + "description": "The: number of buckets into which indexes using a hash table are split. The default is 16 and this number has to be a power of 2 and less than or equal to 1024.
For very large collections one should increase this to avoid long pauses when the hash table has to be initially built or resized, since buckets are resized individually and can be initially built in parallel. For example, 64 might be a sensible value for a collection with 100 000 000 documents. Currently, only the edge index respects this value, but other index types might follow in future ArangoDB versions. Changes (see below) are applied when the collection is loaded the next time.
", + "format": "int64", + "type": "integer" + }, + "isSystem": { + "description": "If true, create a system collection. In this case collection-name should start with an underscore. End users should normally create non-system collections only. API implementors may be required to create system collections in very special occasions, but normally a regular collection will do. (The default is false)
", + "format": "", + "type": "boolean" + }, + "isVolatile": { + "description": "If true then the collection data is kept in-memory only and not made persistent. Unloading the collection will cause the collection data to be discarded. Stopping or re-starting the server will also cause full loss of data in the collection. Setting this option will make the resulting collection be slightly faster than regular collections because ArangoDB does not enforce any synchronization to disk and does not calculate any CRC checksums for datafiles (as there are no datafiles). This option should therefore be used for cache-type collections only, and not for data that cannot be re-created otherwise. (The default is false)
", + "format": "", + "type": "boolean" + }, + "journalSize": { + "description": "The maximal size of a journal or datafile in bytes. The value must be at least `1048576` (1 MiB). (The default is a configuration parameter)
", + "format": "int64", + "type": "integer" + }, + "keyOptions": { + "$ref": "#/definitions/JSF_post_api_collection_opts" + }, + "name": { + "description": "The name of the collection.
", + "type": "string" + }, + "numberOfShards": { + "description": "(The default is 1): in a cluster, this value determines the number of shards to create for the collection. In a single server setup, this option is meaningless.
", + "format": "int64", + "type": "integer" + }, + "shardKeys": { + "description": "(The default is [ \"_key\" ]): in a cluster, this attribute determines which document attributes are used to determine the target shard for documents. Documents are sent to shards based on the values of their shard key attributes. The values of all shard key attributes in a document are hashed, and the hash value is used to determine the target shard. Note: Values of shard key attributes cannot be changed once set. This option is meaningless in a single server setup.
", + "type": "string" + }, + "type": { + "description": "(The default is 2): the type of the collection to create. The following values for type are valid:
  • 2: document collection
  • 3: edges collection
", + "format": "int64", + "type": "integer" + }, + "waitForSync": { + "description": "If true then the data is synchronized to disk before returning from a document create, update, replace or removal operation. (default: false)
", + "format": "", + "type": "boolean" + } + }, + "required": [ + "name" + ], + "type": "object", + "x-filename": "Collections - js/actions/_api/collection/app.js" + }, + "JSF_post_api_collection_opts": { + "description": "additional options for key generation. If specified, then keyOptions should be a JSON array containing the following attributes:
", + "properties": { + "allowUserKeys": { + "description": "if set to true, then it is allowed to supply own key values in the _key attribute of a document. If set to false, then the key generator will solely be responsible for generating keys and supplying own key values in the _key attribute of documents is considered an error.
", + "type": "boolean" + }, + "increment": { + "description": "increment value for autoincrement key generator. Not used for other key generator types.
", + "format": "int64", + "type": "integer" + }, + "offset": { + "description": "Initial offset value for autoincrement key generator. Not used for other key generator types.
", + "format": "int64", + "type": "integer" + }, + "type": { + "description": "specifies the type of the key generator. The currently available generators are traditional and autoincrement.
", + "type": "string" + } + }, + "type": "object", + "x-filename": "Collections - js/actions/_api/collection/app.js" + }, + "JSF_post_api_cursor": { + "properties": { + "batchSize": { + "description": "maximum number of result documents to be transferred from the server to the client in one roundtrip. If this attribute is not set, a server-controlled default value will be used. A batchSize value of 0 is disallowed.
", + "format": "int64", + "type": "integer" + }, + "bindVars": { + "description": "list of bind parameter objects.
", + "items": { + "additionalProperties": {}, + "type": "object" + }, + "type": "array" + }, + "cache": { + "description": "flag to determine whether the AQL query cache shall be used. If set to false, then any query cache lookup will be skipped for the query. If set to true, it will lead to the query cache being checked for the query if the query cache mode is either on or demand.
", + "format": "", + "type": "boolean" + }, + "count": { + "description": "indicates whether the number of documents in the result set should be returned in the \"count\" attribute of the result. Calculating the \"count\" attribute might in the future have a performance impact for some queries so this option is turned off by default, and \"count\" is only returned when requested.
", + "format": "", + "type": "boolean" + }, + "options": { + "$ref": "#/definitions/JSF_post_api_cursor_opts" + }, + "query": { + "description": "contains the query string to be executed
", + "type": "string" + }, + "ttl": { + "description": "The time-to-live for the cursor (in seconds). The cursor will be removed on the server automatically after the specified amount of time. This is useful to ensure garbage collection of cursors that are not fully fetched by clients. If not set, a server-defined value will be used.
", + "format": "int64", + "type": "integer" + } + }, + "required": [ + "query" + ], + "type": "object", + "x-filename": "Cursors - arangod/RestHandler/RestCursorHandler.cpp" + }, + "JSF_post_api_cursor_opts": { + "description": "key/value object with extra options for the query.
", + "properties": { + "fullCount": { + "description": "if set to true and the query contains a LIMIT clause, then the result will contain an extra attribute extra with a sub-attribute fullCount. This sub-attribute will contain the number of documents in the result before the last LIMIT in the query was applied. It can be used to count the number of documents that match certain filter criteria, but only return a subset of them, in one go. It is thus similar to MySQL's SQL_CALC_FOUND_ROWS hint. Note that setting the option will disable a few LIMIT optimizations and may lead to more documents being processed, and thus make queries run longer. Note that the fullCount sub-attribute will only be present in the result if the query has a LIMIT clause and the LIMIT clause is actually used in the query.
", + "type": "boolean" + }, + "maxPlans": { + "description": "limits the maximum number of plans that are created by the AQL query optimizer.
", + "format": "int64", + "type": "integer" + }, + "optimizer.rules": { + "description": "a list of to-be-included or to-be-excluded optimizer rules can be put into this attribute, telling the optimizer to include or exclude specific rules. To disable a rule, prefix its name with a `-`, to enable a rule, prefix it with a `+`. There is also a pseudo-rule `all`, which will match all optimizer rules.
", + "format": "string", + "items": { + "type": "string" + }, + "type": "array" + }, + "profile": { + "description": "if set to true, then the additional query profiling information will be returned in the extra.stats return attribute if the query result is not served from the query cache.
", + "type": "boolean" + } + }, + "type": "object", + "x-filename": "Cursors - arangod/RestHandler/RestCursorHandler.cpp" + }, + "JSF_post_api_cursor_rc_201": { + "properties": { + "cached": { + "description": "a boolean flag indicating whether the query result was served from the query cache or not. If the query result is served from the query cache, the extra return attribute will not contain any stats sub-attribute and no profile sub-attribute.
", + "format": "", + "type": "boolean" + }, + "code": { + "description": "the HTTP status code
", + "format": "integer", + "type": "integer" + }, + "count": { + "description": "the total number of result documents available (only available if the query was executed with the count attribute set)
", + "format": "int64", + "type": "integer" + }, + "error": { + "description": "A flag to indicate that an error occurred (false in this case)
", + "format": "", + "type": "boolean" + }, + "extra": { + "additionalProperties": {}, + "description": "an optional JSON object with extra information about the query result contained in its stats sub-attribute. For data-modification queries, the extra.stats sub-attribute will contain the number of modified documents and the number of documents that could not be modified due to an error (if ignoreErrors query option is specified)
", + "type": "object" + }, + "hasMore": { + "description": "A boolean indicator whether there are more results available for the cursor on the server
", + "format": "", + "type": "boolean" + }, + "id": { + "description": "id of temporary cursor created on the server (optional, see above)
", + "type": "string" + }, + "result": { + "description": "an array of result documents (might be empty if query has no results)
", + "items": {}, + "type": "array" + } + }, + "required": [ + "error", + "code", + "hasMore", + "id", + "cached" + ], + "type": "object", + "x-filename": "Cursors - arangod/RestHandler/RestCursorHandler.cpp" + }, + "JSF_post_api_cursor_rc_400": { + "properties": { + "code": { + "description": "the HTTP status code
", + "format": "int64", + "type": "integer" + }, + "error": { + "description": "boolean flag to indicate that an error occurred (true in this case)
", + "format": "", + "type": "boolean" + }, + "errorMessage": { + "description": "a descriptive error message
If the query specification is complete, the server will process the query. If an error occurs during query processing, the server will respond with HTTP 400. Again, the body of the response will contain details about the error.
A list of query errors can be found (../ArangoErrors/README.md) here.

", + "type": "string" + }, + "errorNum": { + "description": "the server error number
", + "format": "int64", + "type": "integer" + } + }, + "required": [ + "error", + "code", + "errorNum", + "errorMessage" + ], + "type": "object", + "x-filename": "Cursors - arangod/RestHandler/RestCursorHandler.cpp" + }, + "JSF_post_api_explain": { + "properties": { + "bindVars": { + "description": "key/value pairs representing the bind values
", + "items": { + "additionalProperties": {}, + "type": "object" + }, + "type": "array" + }, + "options": { + "$ref": "#/definitions/explain_options" + }, + "query": { + "description": "the query which you want explained; If the query references any bind variables, these must also be passed in the attribute bindVars. Additional options for the query can be passed in the options attribute.
", + "type": "string" + } + }, + "required": [ + "query" + ], + "type": "object", + "x-filename": "AQL - arangod/RestHandler/RestQueryHandler.cpp, js/actions/api-aqlfunction.js, js/actions/api-explain.js, arangod/RestHandler/RestQueryCacheHandler.cpp" + }, + "JSF_post_api_export": { + "properties": { + "batchSize": { + "description": "maximum number of result documents to be transferred from the server to the client in one roundtrip (optional). If this attribute is not set, a server-controlled default value will be used.
", + "format": "int64", + "type": "integer" + }, + "count": { + "description": "boolean flag that indicates whether the number of documents in the result set should be returned in the \"count\" attribute of the result (optional). Calculating the \"count\" attribute might in the future have a performance impact so this option is turned off by default, and \"count\" is only returned when requested.
", + "format": "", + "type": "boolean" + }, + "flush": { + "description": "if set to true, a WAL flush operation will be executed prior to the export. The flush operation will start copying documents from the WAL to the collection's datafiles. There will be an additional wait time of up to flushWait seconds after the flush to allow the WAL collector to change the adjusted document meta-data to point into the datafiles, too. The default value is false (i.e. no flush) so most recently inserted or updated documents from the collection might be missing in the export.
", + "format": "", + "type": "boolean" + }, + "flushWait": { + "description": "maximum wait time in seconds after a flush operation. The default value is 10. This option only has an effect when flush is set to true.
", + "format": "int64", + "type": "integer" + }, + "limit": { + "description": "an optional limit value, determining the maximum number of documents to be included in the cursor. Omitting the limit attribute or setting it to 0 will lead to no limit being used. If a limit is used, it is undefined which documents from the collection will be included in the export and which will be excluded. This is because there is no natural order of documents in a collection.
", + "format": "int64", + "type": "integer" + }, + "restrict": { + "$ref": "#/definitions/JSF_post_api_export_restrictions" + }, + "ttl": { + "description": "an optional time-to-live for the cursor (in seconds). The cursor will be removed on the server automatically after the specified amount of time. This is useful to ensure garbage collection of cursors that are not fully fetched by clients. If not set, a server-defined value will be used.
", + "format": "int64", + "type": "integer" + } + }, + "required": [ + "flush", + "flushWait", + "count", + "batchSize", + "limit", + "ttl" + ], + "type": "object", + "x-filename": "Bulk - arangod/RestHandler/RestExportHandler.cpp, arangod/RestHandler/RestImportHandler.cpp, arangod/RestHandler/RestBatchHandler.cpp" + }, + "JSF_post_api_export_restrictions": { + "description": "an object containing an array of attribute names that will be included or excluded when returning result documents.
Not specifying restrict will by default return all attributes of each document.
", + "properties": { + "fields": { + "description": "Contains an array of attribute names to include or exclude. Matching of attribute names for inclusion or exclusion will be done on the top level only. Specifying names of nested attributes is not supported at the moment.

", + "format": "string", + "items": { + "type": "string" + }, + "type": "array" + }, + "type": { + "description": "has to be be set to either include or exclude depending on which you want to use
", + "type": "string" + } + }, + "type": "object", + "x-filename": "Bulk - arangod/RestHandler/RestExportHandler.cpp, arangod/RestHandler/RestImportHandler.cpp, arangod/RestHandler/RestBatchHandler.cpp" + }, + "JSF_post_api_index_cap": { + "properties": { + "byteSize": { + "description": "The maximal size of the active document data in the collection (in bytes). If specified, the value must be at least 16384.

", + "format": "int64", + "type": "integer" + }, + "size": { + "description": "The maximal number of documents for the collection. If specified, the value must be greater than zero.
", + "format": "int64", + "type": "integer" + }, + "type": { + "description": "must be equal to \"cap\".
", + "type": "string" + } + }, + "required": [ + "type" + ], + "type": "object", + "x-filename": "Indexes - js/actions/api-index.js" + }, + "JSF_post_api_index_fulltext": { + "properties": { + "fields": { + "description": "an array of attribute names. Currently, the array is limited to exactly one attribute.
", + "items": { + "type": "string" + }, + "type": "array" + }, + "minLength": { + "description": "Minimum character length of words to index. Will default to a server-defined value if unspecified. It is thus recommended to set this value explicitly when creating the index.
", + "format": "int64", + "type": "integer" + }, + "type": { + "description": "must be equal to \"fulltext\".
", + "type": "string" + } + }, + "required": [ + "type", + "fields", + "minLength" + ], + "type": "object", + "x-filename": "Indexes - js/actions/api-index.js" + }, + "JSF_post_api_index_geo": { + "properties": { + "fields": { + "description": "An array with one or two attribute paths.
If it is an array with one attribute path location, then a geo-spatial index on all documents is created using location as path to the coordinates. The value of the attribute must be an array with at least two double values. The array must contain the latitude (first value) and the longitude (second value). All documents, which do not have the attribute path or with value that are not suitable, are ignored.
If it is an array with two attribute paths latitude and longitude, then a geo-spatial index on all documents is created using latitude and longitude as paths the latitude and the longitude. The value of the attribute latitude and of the attribute longitude must a double. All documents, which do not have the attribute paths or which values are not suitable, are ignored.
", + "items": { + "type": "string" + }, + "type": "array" + }, + "geoJson": { + "description": "If a geo-spatial index on a location is constructed and geoJson is true, then the order within the array is longitude followed by latitude. This corresponds to the format described in http://geojson.org/geojson-spec.html#positions
", + "type": "string" + }, + "type": { + "description": "must be equal to \"geo\".
", + "type": "string" + } + }, + "required": [ + "type", + "fields", + "geoJson" + ], + "type": "object", + "x-filename": "Indexes - js/actions/api-index.js" + }, + "JSF_post_api_index_hash": { + "properties": { + "fields": { + "description": "an array of attribute paths.
", + "items": { + "type": "string" + }, + "type": "array" + }, + "sparse": { + "description": "if true, then create a sparse index.
", + "format": "", + "type": "boolean" + }, + "type": { + "description": "must be equal to \"hash\".
", + "type": "string" + }, + "unique": { + "description": "if true, then create a unique index.
", + "format": "", + "type": "boolean" + } + }, + "required": [ + "type", + "fields", + "unique", + "sparse" + ], + "type": "object", + "x-filename": "Indexes - js/actions/api-index.js" + }, + "JSF_post_api_index_skiplist": { + "properties": { + "fields": { + "description": "an array of attribute paths.
", + "items": { + "type": "string" + }, + "type": "array" + }, + "sparse": { + "description": "if true, then create a sparse index.
", + "format": "", + "type": "boolean" + }, + "type": { + "description": "must be equal to \"skiplist\".
", + "type": "string" + }, + "unique": { + "description": "if true, then create a unique index.
", + "format": "", + "type": "boolean" + } + }, + "required": [ + "type", + "fields", + "unique", + "sparse" + ], + "type": "object", + "x-filename": "Indexes - js/actions/api-index.js" + }, + "JSF_post_api_new_tasks": { + "properties": { + "command": { + "description": "The JavaScript code to be executed
", + "type": "string" + }, + "name": { + "description": "The name of the task
", + "type": "string" + }, + "offset": { + "description": "Number of seconds initial delay
", + "format": "int64", + "type": "integer" + }, + "params": { + "description": "The parameters to be passed into command
", + "type": "string" + }, + "period": { + "description": "number of seconds between the executions
", + "format": "int64", + "type": "integer" + } + }, + "required": [ + "name", + "command", + "params" + ], + "type": "object", + "x-filename": "Administration - js/actions/_admin/app.js, js/actions/_admin/routing/app.js, js/actions/_admin/server/app.js, js/actions/_admin/database/app.js, arangod/RestHandler/RestShutdownHandler.cpp, arangod/RestHandler/RestAdminLogHandler.cpp, js/actions/api-tasks.js, js/actions/api-endpoint.js, arangod/RestHandler/RestVersionHandler.cpp, js/actions/api-system.js" + }, + "JSF_post_api_transaction": { + "properties": { + "action": { + "description": "the actual transaction operations to be executed, in the form of stringified JavaScript code. The code will be executed on server side, with late binding. It is thus critical that the code specified in action properly sets up all the variables it needs. If the code specified in action ends with a return statement, the value returned will also be returned by the REST API in the result attribute if the transaction committed successfully.
", + "type": "string" + }, + "collections": { + "description": "contains the array of collections to be used in the transaction (mandatory). collections must be a JSON object that can have the optional sub-attributes read and write. read and write must each be either arrays of collections names or strings with a single collection name.
", + "type": "string" + }, + "lockTimeout": { + "description": "an optional numeric value that can be used to set a timeout for waiting on collection locks. If not specified, a default value will be used. Setting lockTimeout to 0 will make ArangoDB not time out waiting for a lock.
", + "format": "int64", + "type": "integer" + }, + "params": { + "description": "optional arguments passed to action.
", + "type": "string" + }, + "waitForSync": { + "description": "an optional boolean flag that, if set, will force the transaction to write all data to disk before returning.
", + "format": "boolean", + "type": "boolean" + } + }, + "required": [ + "collections", + "action" + ], + "type": "object", + "x-filename": "Transactions - js/actions/api-transaction.js" + }, + "JSF_post_batch_replication": { + "properties": { + "ttl": { + "description": "the time-to-live for the new batch (in seconds)
A JSON object with the batch configuration.
", + "format": "int64", + "type": "integer" + } + }, + "required": [ + "ttl" + ], + "type": "object", + "x-filename": "Replication - arangod/RestHandler/RestReplicationHandler.cpp" + }, + "JSF_put_api_new_tasks": { + "properties": { + "command": { + "description": "The JavaScript code to be executed
", + "type": "string" + }, + "name": { + "description": "The name of the task
", + "type": "string" + }, + "offset": { + "description": "Number of seconds initial delay
", + "format": "int64", + "type": "integer" + }, + "params": { + "description": "The parameters to be passed into command
", + "type": "string" + }, + "period": { + "description": "number of seconds between the executions
", + "format": "int64", + "type": "integer" + } + }, + "required": [ + "name", + "command", + "params" + ], + "type": "object", + "x-filename": "Administration - js/actions/_admin/app.js, js/actions/_admin/routing/app.js, js/actions/_admin/server/app.js, js/actions/_admin/database/app.js, arangod/RestHandler/RestShutdownHandler.cpp, arangod/RestHandler/RestAdminLogHandler.cpp, js/actions/api-tasks.js, js/actions/api-endpoint.js, arangod/RestHandler/RestVersionHandler.cpp, js/actions/api-system.js" + }, + "JSF_put_api_replication_applier_adjust": { + "properties": { + "adaptivePolling": { + "description": "if set to true, the replication applier will fall to sleep for an increasingly long period in case the logger server at the endpoint does not have any more replication events to apply. Using adaptive polling is thus useful to reduce the amount of work for both the applier and the logger server for cases when there are only infrequent changes. The downside is that when using adaptive polling, it might take longer for the replication applier to detect that there are new replication events on the logger server.
Setting adaptivePolling to false will make the replication applier contact the logger server in a constant interval, regardless of whether the logger server provides updates frequently or seldom.
", + "format": "", + "type": "boolean" + }, + "autoStart": { + "description": "whether or not to auto-start the replication applier on (next and following) server starts
", + "format": "", + "type": "boolean" + }, + "chunkSize": { + "description": "the requested maximum size for log transfer packets that is used when the endpoint is contacted.
", + "format": "int64", + "type": "integer" + }, + "connectTimeout": { + "description": "the timeout (in seconds) when attempting to connect to the endpoint. This value is used for each connection attempt.
", + "format": "int64", + "type": "integer" + }, + "database": { + "description": "the name of the database on the endpoint. If not specified, defaults to the current local database name.
", + "type": "string" + }, + "endpoint": { + "description": "the logger server to connect to (e.g. \"tcp://192.168.173.13:8529\"). The endpoint must be specified.
", + "type": "string" + }, + "includeSystem": { + "description": "whether or not system collection operations will be applied
", + "format": "", + "type": "boolean" + }, + "maxConnectRetries": { + "description": "the maximum number of connection attempts the applier will make in a row. If the applier cannot establish a connection to the endpoint in this number of attempts, it will stop itself.
", + "format": "int64", + "type": "integer" + }, + "password": { + "description": "the password to use when connecting to the endpoint.
", + "type": "string" + }, + "requestTimeout": { + "description": "the timeout (in seconds) for individual requests to the endpoint.
", + "format": "int64", + "type": "integer" + }, + "requireFromPresent": { + "description": "if set to true, then the replication applier will check at start whether the start tick from which it starts or resumes replication is still present on the master. If not, then there would be data loss. If requireFromPresent is true, the replication applier will abort with an appropriate error message. If set to false, then the replication applier will still start, and ignore the data loss.
", + "format": "", + "type": "boolean" + }, + "restrictCollections": { + "description": "the array of collections to include or exclude, based on the setting of restrictType
", + "items": { + "type": "string" + }, + "type": "array" + }, + "restrictType": { + "description": "the configuration for restrictCollections; Has to be either include or exclude
", + "type": "string" + }, + "username": { + "description": "an optional ArangoDB username to use when connecting to the endpoint.
", + "type": "string" + }, + "verbose": { + "description": "if set to true, then a log line will be emitted for all operations performed by the replication applier. This should be used for debugging replication problems only.
", + "format": "", + "type": "boolean" + } + }, + "required": [ + "endpoint", + "database", + "password", + "maxConnectRetries", + "connectTimeout", + "requestTimeout", + "chunkSize", + "autoStart", + "adaptivePolling", + "includeSystem", + "requireFromPresent", + "verbose", + "restrictType" + ], + "type": "object", + "x-filename": "Replication - arangod/RestHandler/RestReplicationHandler.cpp" + }, + "JSF_put_api_replication_makeSlave": { + "properties": { + "adaptivePolling": { + "description": "whether or not the replication applier will use adaptive polling.
", + "format": "", + "type": "boolean" + }, + "chunkSize": { + "description": "the requested maximum size for log transfer packets that is used when the endpoint is contacted.
", + "format": "int64", + "type": "integer" + }, + "connectTimeout": { + "description": "the timeout (in seconds) when attempting to connect to the endpoint. This value is used for each connection attempt.
", + "format": "int64", + "type": "integer" + }, + "database": { + "description": "the database name on the master (if not specified, defaults to the name of the local current database).
", + "type": "string" + }, + "endpoint": { + "description": "the master endpoint to connect to (e.g. \"tcp://192.168.173.13:8529\").
", + "type": "string" + }, + "includeSystem": { + "description": "whether or not system collection operations will be applied
", + "format": "", + "type": "boolean" + }, + "maxConnectRetries": { + "description": "the maximum number of connection attempts the applier will make in a row. If the applier cannot establish a connection to the endpoint in this number of attempts, it will stop itself.
", + "format": "int64", + "type": "integer" + }, + "password": { + "description": "the password to use when connecting to the master.
", + "type": "string" + }, + "requestTimeout": { + "description": "the timeout (in seconds) for individual requests to the endpoint.
", + "format": "int64", + "type": "integer" + }, + "requireFromPresent": { + "description": "if set to true, then the replication applier will check at start of its continuous replication if the start tick from the dump phase is still present on the master. If not, then there would be data loss. If requireFromPresent is true, the replication applier will abort with an appropriate error message. If set to false, then the replication applier will still start, and ignore the data loss.
", + "format": "", + "type": "boolean" + }, + "restrictCollections": { + "description": "an optional array of collections for use with restrictType. If restrictType is include, only the specified collections will be sychronised. If restrictType is exclude, all but the specified collections will be synchronized.
", + "items": { + "type": "string" + }, + "type": "array" + }, + "restrictType": { + "description": "an optional string value for collection filtering. When specified, the allowed values are include or exclude.
", + "type": "string" + }, + "username": { + "description": "an optional ArangoDB username to use when connecting to the master.
", + "type": "string" + }, + "verbose": { + "description": "if set to true, then a log line will be emitted for all operations performed by the replication applier. This should be used for debugging replication problems only.
", + "format": "", + "type": "boolean" + } + }, + "required": [ + "endpoint", + "database", + "password", + "includeSystem", + "maxConnectRetries", + "connectTimeout", + "requestTimeout", + "chunkSize", + "adaptivePolling", + "requireFromPresent" + ], + "type": "object", + "x-filename": "Replication - arangod/RestHandler/RestReplicationHandler.cpp" + }, + "JSF_put_api_replication_synchronize": { + "properties": { + "database": { + "description": "the database name on the master (if not specified, defaults to the name of the local current database).
", + "type": "string" + }, + "endpoint": { + "description": "the master endpoint to connect to (e.g. \"tcp://192.168.173.13:8529\").
", + "type": "string" + }, + "includeSystem": { + "description": "whether or not system collection operations will be applied
", + "format": "", + "type": "boolean" + }, + "incremental": { + "description": "if set to true, then an incremental synchronization method will be used for synchronizing data in collections. This method is useful when collections already exist locally, and only the remaining differences need to be transferred from the remote endpoint. In this case, the incremental synchronization can be faster than a full synchronization. The default value is false, meaning that the complete data from the remote collection will be transferred.
", + "format": "", + "type": "boolean" + }, + "password": { + "description": "the password to use when connecting to the endpoint.
", + "type": "string" + }, + "restrictCollections": { + "description": "an optional array of collections for use with restrictType. If restrictType is include, only the specified collections will be sychronised. If restrictType is exclude, all but the specified collections will be synchronized.
", + "items": { + "type": "string" + }, + "type": "array" + }, + "restrictType": { + "description": "an optional string value for collection filtering. When specified, the allowed values are include or exclude.
", + "type": "string" + }, + "username": { + "description": "an optional ArangoDB username to use when connecting to the endpoint.
", + "type": "string" + } + }, + "required": [ + "endpoint", + "password" + ], + "type": "object", + "x-filename": "Replication - arangod/RestHandler/RestReplicationHandler.cpp" + }, + "JSF_put_batch_replication": { + "properties": { + "ttl": { + "description": "the time-to-live for the new batch (in seconds)
", + "format": "int64", + "type": "integer" + } + }, + "required": [ + "ttl" + ], + "type": "object", + "x-filename": "Replication - arangod/RestHandler/RestReplicationHandler.cpp" + }, + "PostApiQueryProperties": { + "properties": { + "query": { + "description": "To validate a query string without executing it, the query string can be passed to the server via an HTTP POST request.
", + "type": "string" + } + }, + "required": [ + "query" + ], + "type": "object", + "x-filename": "AQL - arangod/RestHandler/RestQueryHandler.cpp, js/actions/api-aqlfunction.js, js/actions/api-explain.js, arangod/RestHandler/RestQueryCacheHandler.cpp" + }, + "PutApiQueryCacheProperties": { + "properties": { + "maxResults": { + "description": "the maximum number of query results that will be stored per database-specific cache.

", + "format": "int64", + "type": "integer" + }, + "mode": { + "description": " the mode the AQL query cache should operate in. Possible values are off, on or demand.
", + "type": "string" + } + }, + "required": [ + "mode", + "maxResults" + ], + "type": "object", + "x-filename": "AQL - arangod/RestHandler/RestQueryHandler.cpp, js/actions/api-aqlfunction.js, js/actions/api-explain.js, arangod/RestHandler/RestQueryCacheHandler.cpp" + }, + "PutApiQueryProperties": { + "properties": { + "enabled": { + "description": "If set to true, then queries will be tracked. If set to false, neither queries nor slow queries will be tracked.
", + "format": "", + "type": "boolean" + }, + "maxQueryStringLength": { + "description": "The maximum query string length to keep in the list of queries. Query strings can have arbitrary lengths, and this property can be used to save memory in case very long query strings are used. The value is specified in bytes.
", + "format": "int64", + "type": "integer" + }, + "maxSlowQueries": { + "description": "The maximum number of slow queries to keep in the list of slow queries. If the list of slow queries is full, the oldest entry in it will be discarded when additional slow queries occur.
", + "format": "int64", + "type": "integer" + }, + "slowQueryThreshold": { + "description": "The threshold value for treating a query as slow. A query with a runtime greater or equal to this threshold value will be put into the list of slow queries when slow query tracking is enabled. The value for slowQueryThreshold is specified in seconds.
", + "format": "int64", + "type": "integer" + }, + "trackSlowQueries": { + "description": "If set to true, then slow queries will be tracked in the list of slow queries if their runtime exceeds the value set in slowQueryThreshold. In order for slow queries to be tracked, the enabled property must also be set to true.
", + "format": "", + "type": "boolean" + } + }, + "required": [ + "enabled", + "trackSlowQueries", + "maxSlowQueries", + "slowQueryThreshold", + "maxQueryStringLength" + ], + "type": "object", + "x-filename": "AQL - arangod/RestHandler/RestQueryHandler.cpp, js/actions/api-aqlfunction.js, js/actions/api-explain.js, arangod/RestHandler/RestQueryCacheHandler.cpp" + }, + "RestLookupByKeys": { + "properties": { + "collection": { + "description": "The name of the collection to look in for the documents
", + "type": "string" + }, + "keys": { + "description": "array with the _keys of documents to remove.
", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "collection", + "keys" + ], + "type": "object", + "x-filename": "Simple Queries - js/actions/api-simple.js, arangod/RestHandler/RestSimpleHandler.cpp, arangod/RestHandler/RestSimpleQueryHandler.cpp" + }, + "RestRemoveByKeys": { + "properties": { + "collection": { + "description": "The name of the collection to look in for the documents to remove
", + "type": "string" + }, + "keys": { + "description": "array with the _keys of documents to remove.
", + "items": { + "type": "string" + }, + "type": "array" + }, + "options": { + "$ref": "#/definitions/put_api_simple_remove_by_keys_opts" + } + }, + "required": [ + "collection", + "keys" + ], + "type": "object", + "x-filename": "Simple Queries - js/actions/api-simple.js, arangod/RestHandler/RestSimpleHandler.cpp, arangod/RestHandler/RestSimpleQueryHandler.cpp" + }, + "collection_figures": { + "description": "metrics of the collection
", + "properties": { + "alive": { + "$ref": "#/definitions/collection_figures_alive" + }, + "attributes": { + "$ref": "#/definitions/collection_figures_attributes" + }, + "compactors": { + "$ref": "#/definitions/collection_figures_compactors" + }, + "datafiles": { + "$ref": "#/definitions/collection_figures_datafiles" + }, + "dead": { + "$ref": "#/definitions/collection_figures_dead" + }, + "indexes": { + "$ref": "#/definitions/collection_figures_indexes" + }, + "journals": { + "$ref": "#/definitions/collection_figures_journals" + }, + "maxTick": { + "description": "The tick of the last marker that was stored in a journal of the collection. This might be 0 if the collection does not yet have a journal.
", + "format": "int64", + "type": "integer" + }, + "shapefiles": { + "$ref": "#/definitions/collection_figures_shapefiles" + }, + "shapes": { + "$ref": "#/definitions/collection_figures_shapes" + }, + "uncollectedLogfileEntries": { + "description": "The number of markers in the write-ahead log for this collection that have not been transferred to journals or datafiles.
", + "format": "int64", + "type": "integer" + } + }, + "required": [ + "figures", + "alive", + "dead", + "datafiles", + "journals", + "compactors", + "shapefiles", + "shapes", + "attributes", + "indexes" + ], + "type": "object", + "x-filename": "Collections - js/actions/_api/collection/app.js" + }, + "collection_figures_alive": { + "description": "the currently active figures
", + "properties": { + "count": { + "description": "The number of currently active documents in all datafiles and journals of the collection. Documents that are contained in the write-ahead log only are not reported in this figure.
", + "format": "int64", + "type": "integer" + }, + "size": { + "description": "The total size in bytes used by all active documents of the collection. Documents that are contained in the write-ahead log only are not reported in this figure.
", + "format": "int64", + "type": "integer" + } + }, + "type": "object", + "x-filename": "Collections - js/actions/_api/collection/app.js" + }, + "collection_figures_attributes": { + "description": "", + "properties": { + "count": { + "description": "The total number of attributes used in the collection. Note: the value includes data of attributes that are not in use anymore. Attributes that are contained in the write-ahead log only are not reported in this figure.
", + "format": "int64", + "type": "integer" + }, + "size": { + "description": "The total size of the attribute data (in bytes). Note: the value includes data of attributes that are not in use anymore. Attributes that are contained in the write-ahead log only are not reported in this figure.
", + "format": "int64", + "type": "integer" + } + }, + "type": "object", + "x-filename": "Collections - js/actions/_api/collection/app.js" + }, + "collection_figures_compactors": { + "description": "
", + "properties": { + "count": { + "description": "The number of compactor files.
", + "format": "int64", + "type": "integer" + }, + "fileSize": { + "description": "The total filesize of all compactor files (in bytes).
", + "format": "int64", + "type": "integer" + } + }, + "type": "object", + "x-filename": "Collections - js/actions/_api/collection/app.js" + }, + "collection_figures_datafiles": { + "description": "Metrics regarding the datafiles
", + "properties": { + "count": { + "description": "The number of datafiles.
", + "format": "int64", + "type": "integer" + }, + "fileSize": { + "description": "The total filesize of datafiles (in bytes).
", + "format": "int64", + "type": "integer" + } + }, + "type": "object", + "x-filename": "Collections - js/actions/_api/collection/app.js" + }, + "collection_figures_dead": { + "description": "the items waiting to be swept away by the cleaner
", + "properties": { + "count": { + "description": "The number of dead documents. This includes document versions that have been deleted or replaced by a newer version. Documents deleted or replaced that are contained the write-ahead log only are not reported in this figure.
", + "format": "int64", + "type": "integer" + }, + "deletion": { + "description": "The total number of deletion markers. Deletion markers only contained in the write-ahead log are not reporting in this figure.
", + "format": "int64", + "type": "integer" + }, + "size": { + "description": "The total size in bytes used by all dead documents.
", + "format": "int64", + "type": "integer" + } + }, + "type": "object", + "x-filename": "Collections - js/actions/_api/collection/app.js" + }, + "collection_figures_indexes": { + "description": "", + "properties": { + "count": { + "description": "The total number of indexes defined for the collection, including the pre-defined indexes (e.g. primary index).
", + "format": "int64", + "type": "integer" + }, + "size": { + "description": "The total memory allocated for indexes in bytes.
", + "format": "int64", + "type": "integer" + } + }, + "type": "object", + "x-filename": "Collections - js/actions/_api/collection/app.js" + }, + "collection_figures_journals": { + "description": "Metrics regarding the journal files
", + "properties": { + "count": { + "description": "The number of journal files.
", + "format": "int64", + "type": "integer" + }, + "fileSize": { + "description": "The total filesize of all journal files (in bytes).
", + "format": "int64", + "type": "integer" + } + }, + "type": "object", + "x-filename": "Collections - js/actions/_api/collection/app.js" + }, + "collection_figures_shapefiles": { + "description": "deprecated
", + "properties": { + "count": { + "description": "The number of shape files. This value is deprecated and kept for compatibility reasons only. The value will always be 0 since ArangoDB 2.0 and higher.
", + "format": "int64", + "type": "integer" + }, + "fileSize": { + "description": "The total filesize of the shape files. This value is deprecated and kept for compatibility reasons only. The value will always be 0 in ArangoDB 2.0 and higher.
", + "format": "int64", + "type": "integer" + } + }, + "type": "object", + "x-filename": "Collections - js/actions/_api/collection/app.js" + }, + "collection_figures_shapes": { + "description": "", + "properties": { + "count": { + "description": "The total number of shapes used in the collection. This includes shapes that are not in use anymore. Shapes that are contained in the write-ahead log only are not reported in this figure.
", + "format": "int64", + "type": "integer" + }, + "size": { + "description": "The total size of all shapes (in bytes). This includes shapes that are not in use anymore. Shapes that are contained in the write-ahead log only are not reported in this figure.
", + "format": "int64", + "type": "integer" + } + }, + "type": "object", + "x-filename": "Collections - js/actions/_api/collection/app.js" + }, + "explain_options": { + "description": "Options for the query
", + "properties": { + "allPlans": { + "description": "if set to true, all possible execution plans will be returned. The default is false, meaning only the optimal plan will be returned.
", + "type": "boolean" + }, + "maxNumberOfPlans": { + "description": "an optional maximum number of plans that the optimizer is allowed to generate. Setting this attribute to a low value allows to put a cap on the amount of work the optimizer does.
", + "format": "int64", + "type": "integer" + }, + "optimizer.rules": { + "description": "an array of to-be-included or to-be-excluded optimizer rules can be put into this attribute, telling the optimizer to include or exclude specific rules. To disable a rule, prefix its name with a `-`, to enable a rule, prefix it with a `+`. There is also a pseudo-rule `all`, which will match all optimizer rules.
", + "format": "string", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object", + "x-filename": "AQL - arangod/RestHandler/RestQueryHandler.cpp, js/actions/api-aqlfunction.js, js/actions/api-explain.js, arangod/RestHandler/RestQueryCacheHandler.cpp" + }, + "put_api_simple_remove_by_example_opts": { + "description": "a json object which can contains following attributes:
", + "properties": { + "limit": { + "description": "an optional value that determines how many documents to delete at most. If limit is specified but is less than the number of documents in the collection, it is undefined which of the documents will be deleted.
", + "type": "string" + }, + "waitForSync": { + "description": "if set to true, then all removal operations will instantly be synchronized to disk. If this is not specified, then the collection's default sync behavior will be applied.
", + "type": "string" + } + }, + "type": "object", + "x-filename": "Simple Queries - js/actions/api-simple.js, arangod/RestHandler/RestSimpleHandler.cpp, arangod/RestHandler/RestSimpleQueryHandler.cpp" + }, + "put_api_simple_remove_by_keys_opts": { + "description": "a json object which can contains following attributes:
", + "properties": { + "waitForSync": { + "description": "if set to true, then all removal operations will instantly be synchronized to disk. If this is not specified, then the collection's default sync behavior will be applied.
", + "type": "string" + } + }, + "type": "object", + "x-filename": "Simple Queries - js/actions/api-simple.js, arangod/RestHandler/RestSimpleHandler.cpp, arangod/RestHandler/RestSimpleQueryHandler.cpp" + }, + "put_api_simple_replace_by_example_options": { + "description": "a json object which can contain following attributes
", + "properties": { + "limit": { + "description": "an optional value that determines how many documents to replace at most. If limit is specified but is less than the number of documents in the collection, it is undefined which of the documents will be replaced.

", + "type": "string" + }, + "waitForSync": { + "description": "if set to true, then all removal operations will instantly be synchronized to disk. If this is not specified, then the collection's default sync behavior will be applied.
", + "type": "boolean" + } + }, + "type": "object", + "x-filename": "Simple Queries - js/actions/api-simple.js, arangod/RestHandler/RestSimpleHandler.cpp, arangod/RestHandler/RestSimpleQueryHandler.cpp" + }, + "put_api_simple_update_by_example_options": { + "description": "a json object which can contains following attributes:
", + "properties": { + "keepNull": { + "description": "This parameter can be used to modify the behavior when handling null values. Normally, null values are stored in the database. By setting the keepNull parameter to false, this behavior can be changed so that all attributes in data with null values will be removed from the updated document.
", + "type": "string" + }, + "limit": { + "description": "an optional value that determines how many documents to update at most. If limit is specified but is less than the number of documents in the collection, it is undefined which of the documents will be updated.
", + "format": "int64", + "type": "integer" + }, + "waitForSync": { + "description": "if set to true, then all removal operations will instantly be synchronized to disk. If this is not specified, then the collection's default sync behavior will be applied.
", + "type": "boolean" + } + }, + "type": "object", + "x-filename": "Simple Queries - js/actions/api-simple.js, arangod/RestHandler/RestSimpleHandler.cpp, arangod/RestHandler/RestSimpleQueryHandler.cpp" + } + }, + "info": { + "description": "ArangoDB REST API Interface", + "license": { + "name": "Apache License, Version 2.0" + }, + "title": "ArangoDB", + "version": "1.0" + }, + "paths": { + "/_admin/cluster-test": { + "delete": { + "description": "\n\nSee GET method.
", + "parameters": [], + "summary": " Delete cluster roundtrip", + "tags": [ + "Cluster" + ], + "x-examples": [], + "x-filename": "Cluster - js/actions/api-cluster.js" + }, + "get": { + "description": "\n\n
Executes a cluster roundtrip from a coordinator to a DB server and back. This call only works in a coordinator node in a cluster. One can and should append an arbitrary path to the URL and the part after /_admin/cluster-test is used as the path of the HTTP request which is sent from the coordinator to a DB node. Likewise, any form data appended to the URL is forwarded in the request to the DB node. This handler takes care of all request types (see below) and uses the same request type in its request to the DB node.
The following HTTP headers are interpreted in a special way:
- X-Shard-ID: This specifies the ID of the shard to which the cluster request is sent and thus tells the system to which DB server to send the cluster request. Note that the mapping from the shard ID to the responsible server has to be defined in the agency under Current/ShardLocation/. One has to give this header, otherwise the system does not know where to send the request. - X-Client-Transaction-ID: the value of this header is taken as the client transaction ID for the request - X-Timeout: specifies a timeout in seconds for the cluster operation. If the answer does not arrive within the specified timeout, an corresponding error is returned and any subsequent real answer is ignored. The default if not given is 24 hours. - X-Synchronous-Mode: If set to true the test function uses synchronous mode, otherwise the default asynchronous operation mode is used. This is mainly for debugging purposes. - Host: This header is ignored and not forwarded to the DB server. - User-Agent: This header is ignored and not forwarded to the DB server.
All other HTTP headers and the body of the request (if present, see other HTTP methods below) are forwarded as given in the original request.
In asynchronous mode the DB server answers with an HTTP request of its own, in synchronous mode it sends a HTTP response. In both cases the headers and the body are used to produce the HTTP response of this API call.
", + "parameters": [], + "responses": { + "200": { + "description": "is returned when everything went well, or if a timeout occurred. In the latter case a body of type application/json indicating the timeout is returned.
" + }, + "403": { + "description": "is returned if ArangoDB is not running in cluster mode.
" + }, + "404": { + "description": "is returned if ArangoDB was not compiled for cluster operation.
" + } + }, + "summary": " Execute cluster roundtrip", + "tags": [ + "Cluster" + ], + "x-examples": [], + "x-filename": "Cluster - js/actions/api-cluster.js" + }, + "head": { + "description": "\n\nSee GET method.
", + "parameters": [], + "summary": " Execute cluster roundtrip", + "tags": [ + "Cluster" + ], + "x-examples": [], + "x-filename": "Cluster - js/actions/api-cluster.js" + }, + "patch": { + "description": "free style json body\n\nSee GET method. The body can be any type and is simply forwarded.
", + "parameters": [ + { + "description": "
", + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "additionalProperties": {}, + "type": "object" + }, + "x-description-offset": 0 + } + ], + "summary": " Update cluster roundtrip", + "tags": [ + "Cluster" + ], + "x-examples": [], + "x-filename": "Cluster - js/actions/api-cluster.js" + }, + "post": { + "description": "free style json body\n\nSee GET method.
", + "parameters": [ + { + "description": "The body can be any type and is simply forwarded.
", + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "additionalProperties": {}, + "type": "object" + }, + "x-description-offset": 0 + } + ], + "summary": " Execute cluster roundtrip", + "tags": [ + "Cluster" + ], + "x-examples": [], + "x-filename": "Cluster - js/actions/api-cluster.js" + }, + "put": { + "description": "free style json body\n\nSee GET method. The body can be any type and is simply forwarded.
", + "parameters": [ + { + "description": "
", + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "additionalProperties": {}, + "type": "object" + }, + "x-description-offset": 0 + } + ], + "summary": " Execute cluster roundtrip", + "tags": [ + "Cluster" + ], + "x-examples": [], + "x-filename": "Cluster - js/actions/api-cluster.js" + } + }, + "/_admin/clusterCheckPort": { + "get": { + "description": "\n\n
", + "parameters": [ + { + "description": "
", + "in": "query", + "name": "port", + "required": true, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "
" + }, + "400": { + "description": "
" + } + }, + "summary": " Check port", + "tags": [ + "Cluster" + ], + "x-examples": [], + "x-filename": "Cluster - js/actions/api-cluster.js" + } + }, + "/_admin/clusterDispatch": { + "post": { + "description": "**A json post document with these Properties is required:**
  • clusterPlan: is a cluster plan (see JSF_cluster_planner_POST),
  • action: can be one of the following: - \"launch\": the cluster is launched for the first time, all data directories and log files are cleaned and created - \"shutdown\": the cluster is shut down, the additional property runInfo (see below) must be bound as well - \"relaunch\": the cluster is launched again, all data directories and log files are untouched and need to be there already - \"cleanup\": use this after a shutdown to remove all data in the data directories and all log files, use with caution - \"isHealthy\": checks whether or not the processes involved in the cluster are running or not. The additional property runInfo (see above) must be bound as well - \"upgrade\": performs an upgrade of a cluster, to this end, the agency is started, and then every server is once started with the \"--upgrade\" option, and then normally. Finally, the script \"verion-check.js\" is run on one of the coordinators for the cluster.
  • runInfo: this is needed for the \"shutdown\" and \"isHealthy\" actions only and should be the structure that \"launch\", \"relaunch\" or \"upgrade\" returned. It contains runtime information like process IDs.
  • myname: is the ID of this dispatcher, this is used to decide which commands are executed locally and which are forwarded to other dispatchers
\n\nThe body must be an object with the following properties:
This call executes the plan by either doing the work personally or by delegating to other dispatchers.
", + "parameters": [ + { + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "$ref": "#/definitions/JSF_cluster_dispatcher_POST" + }, + "x-description-offset": 59 + } + ], + "responses": { + "200": { + "description": "
" + }, + "400": { + "description": "went wrong with the startup.
" + } + }, + "summary": "execute startup commands", + "tags": [ + "Cluster" + ], + "x-examples": [], + "x-filename": "Cluster - js/actions/api-cluster.js" + } + }, + "/_admin/clusterPlanner": { + "post": { + "description": "free style json body\n\nof a cluster and returns a JSON description of a plan to start up this cluster.
", + "parameters": [ + { + "description": "A cluster plan object
", + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "additionalProperties": {}, + "type": "object" + }, + "x-description-offset": 0 + } + ], + "responses": { + "200": { + "description": "
" + }, + "400": { + "description": "
" + } + }, + "summary": " Produce cluster startup plan", + "tags": [ + "Cluster" + ], + "x-examples": [], + "x-filename": "Cluster - js/actions/api-cluster.js" + } + }, + "/_admin/clusterStatistics": { + "get": { + "description": "\n\n
", + "parameters": [ + { + "description": "
", + "in": "query", + "name": "DBserver", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "
" + }, + "400": { + "description": "ID of a DBserver
" + }, + "403": { + "description": "
" + } + }, + "summary": " Queries statistics of DBserver", + "tags": [ + "Cluster" + ], + "x-examples": [], + "x-filename": "Cluster - js/actions/api-cluster.js" + } + }, + "/_admin/database/target-version": { + "get": { + "description": "\n\n
Returns the database-version that this server requires. The version is returned in the version attribute of the result.
", + "parameters": [], + "responses": { + "200": { + "description": "Is returned in all cases.
" + } + }, + "summary": " Return the required version of the database", + "tags": [ + "Administration" + ], + "x-examples": [], + "x-filename": "Administration - js/actions/_admin/app.js, js/actions/_admin/routing/app.js, js/actions/_admin/server/app.js, js/actions/_admin/database/app.js, arangod/RestHandler/RestShutdownHandler.cpp, arangod/RestHandler/RestAdminLogHandler.cpp, js/actions/api-tasks.js, js/actions/api-endpoint.js, arangod/RestHandler/RestVersionHandler.cpp, js/actions/api-system.js" + } + }, + "/_admin/echo": { + "get": { + "description": "\n\n
The call returns an object with the following attributes:
  • headers: object with HTTP headers received
  • requestType: the HTTP request method (e.g. GET)
  • parameters: object with URL parameters received
", + "parameters": [], + "responses": { + "200": { + "description": "Echo was returned successfully.
" + } + }, + "summary": " Return current request", + "tags": [ + "Administration" + ], + "x-examples": [], + "x-filename": "Administration - js/actions/_admin/app.js, js/actions/_admin/routing/app.js, js/actions/_admin/server/app.js, js/actions/_admin/database/app.js, arangod/RestHandler/RestShutdownHandler.cpp, arangod/RestHandler/RestAdminLogHandler.cpp, js/actions/api-tasks.js, js/actions/api-endpoint.js, arangod/RestHandler/RestVersionHandler.cpp, js/actions/api-system.js" + } + }, + "/_admin/execute": { + "post": { + "description": "free style json body\n\n
Executes the javascript code in the body on the server as the body of a function with no arguments. If you have a return statement then the return value you produce will be returned as content type application/json. If the parameter returnAsJSON is set to true, the result will be a JSON object describing the return value directly, otherwise a string produced by JSON.stringify will be returned.
", + "parameters": [ + { + "description": "The body to be executed.
", + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "additionalProperties": {}, + "type": "object" + }, + "x-description-offset": 0 + } + ], + "summary": " Execute program", + "tags": [ + "Administration" + ], + "x-examples": [], + "x-filename": "Administration - js/actions/_admin/app.js, js/actions/_admin/routing/app.js, js/actions/_admin/server/app.js, js/actions/_admin/database/app.js, arangod/RestHandler/RestShutdownHandler.cpp, arangod/RestHandler/RestAdminLogHandler.cpp, js/actions/api-tasks.js, js/actions/api-endpoint.js, arangod/RestHandler/RestVersionHandler.cpp, js/actions/api-system.js" + } + }, + "/_admin/log": { + "get": { + "description": "\n\nReturns fatal, error, warning or info log messages from the server's global log. The result is a JSON object with the following attributes:
  • lid: a list of log entry identifiers. Each log message is uniquely identified by its lid and the identifiers are in ascending order.
  • level: a list of the log-levels for all log entries.
  • timestamp: a list of the timestamps as seconds since 1970-01-01 for all log entries.
  • text a list of the texts of all log entries
  • totalAmount: the total amount of log entries before pagination.
", + "parameters": [ + { + "description": "Returns all log entries up to log level upto. Note that upto must be:
  • fatal or 0
  • error or 1
  • warning or 2
  • info or 3
  • debug or 4 The default value is info.
", + "in": "query", + "name": "upto", + "required": false, + "type": "string" + }, + { + "description": "Returns all log entries of log level level. Note that the URL parameters upto and level are mutually exclusive.
", + "in": "query", + "name": "level", + "required": false, + "type": "string" + }, + { + "description": "Returns all log entries such that their log entry identifier (lid value) is greater or equal to start.
", + "in": "query", + "name": "start", + "required": false, + "type": "number" + }, + { + "description": "Restricts the result to at most size log entries.
", + "in": "query", + "name": "size", + "required": false, + "type": "number" + }, + { + "description": "Starts to return log entries skipping the first offset log entries. offset and size can be used for pagination.
", + "in": "query", + "name": "offset", + "required": false, + "type": "number" + }, + { + "description": "Only return the log entries containing the text specified in search.
", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "Sort the log entries either ascending (if sort is asc) or descending (if sort is desc) according to their lid values. Note that the lid imposes a chronological order. The default value is asc.
", + "in": "query", + "name": "sort", + "required": false, + "type": "string" + } + ], + "responses": { + "400": { + "description": "is returned if invalid values are specified for upto or level.
" + }, + "403": { + "description": "is returned if the log is requested for any database other than _system.
" + }, + "500": { + "description": "is returned if the server cannot generate the result due to an out-of-memory error.
" + } + }, + "summary": " Read global log from the server", + "tags": [ + "Administration" + ], + "x-examples": [], + "x-filename": "Administration - js/actions/_admin/app.js, js/actions/_admin/routing/app.js, js/actions/_admin/server/app.js, js/actions/_admin/database/app.js, arangod/RestHandler/RestShutdownHandler.cpp, arangod/RestHandler/RestAdminLogHandler.cpp, js/actions/api-tasks.js, js/actions/api-endpoint.js, arangod/RestHandler/RestVersionHandler.cpp, js/actions/api-system.js" + } + }, + "/_admin/long_echo": { + "get": { + "description": "\n\n
The call returns an object with the following attributes:
  • headers: object with HTTP headers received
  • requestType: the HTTP request method (e.g. GET)
  • parameters: object with URL parameters received
", + "parameters": [], + "responses": { + "200": { + "description": "Echo was returned successfully.
" + } + }, + "summary": " Return current request and continues", + "tags": [ + "Administration" + ], + "x-examples": [], + "x-filename": "Administration - js/actions/_admin/app.js, js/actions/_admin/routing/app.js, js/actions/_admin/server/app.js, js/actions/_admin/database/app.js, arangod/RestHandler/RestShutdownHandler.cpp, arangod/RestHandler/RestAdminLogHandler.cpp, js/actions/api-tasks.js, js/actions/api-endpoint.js, arangod/RestHandler/RestVersionHandler.cpp, js/actions/api-system.js" + } + }, + "/_admin/routing/reload": { + "post": { + "description": "\n\n
Reloads the routing information from the collection routing.
", + "parameters": [], + "responses": { + "200": { + "description": "Routing information was reloaded successfully.
" + } + }, + "summary": " Reloads the routing information", + "tags": [ + "Administration" + ], + "x-examples": [], + "x-filename": "Administration - js/actions/_admin/app.js, js/actions/_admin/routing/app.js, js/actions/_admin/server/app.js, js/actions/_admin/database/app.js, arangod/RestHandler/RestShutdownHandler.cpp, arangod/RestHandler/RestAdminLogHandler.cpp, js/actions/api-tasks.js, js/actions/api-endpoint.js, arangod/RestHandler/RestVersionHandler.cpp, js/actions/api-system.js" + } + }, + "/_admin/server/role": { + "get": { + "description": "\n\n
Returns the role of a server in a cluster. The role is returned in the role attribute of the result. Possible return values for role are:
  • COORDINATOR: the server is a coordinator in a cluster
  • PRIMARY: the server is a primary database server in a cluster
  • SECONDARY: the server is a secondary database server in a cluster
  • UNDEFINED: in a cluster, UNDEFINED is returned if the server role cannot be determined. On a single server, UNDEFINED is the only possible return value.
", + "parameters": [], + "responses": { + "200": { + "description": "Is returned in all cases.
" + } + }, + "summary": " Return role of a server in a cluster", + "tags": [ + "Administration" + ], + "x-examples": [], + "x-filename": "Administration - js/actions/_admin/app.js, js/actions/_admin/routing/app.js, js/actions/_admin/server/app.js, js/actions/_admin/database/app.js, arangod/RestHandler/RestShutdownHandler.cpp, arangod/RestHandler/RestAdminLogHandler.cpp, js/actions/api-tasks.js, js/actions/api-endpoint.js, arangod/RestHandler/RestVersionHandler.cpp, js/actions/api-system.js" + } + }, + "/_admin/shutdown": { + "get": { + "description": "\n\nThis call initiates a clean shutdown sequence.
", + "parameters": [], + "responses": { + "200": { + "description": "is returned in all cases.
" + } + }, + "summary": " Initiate shutdown sequence", + "tags": [ + "Administration" + ], + "x-examples": [], + "x-filename": "Administration - js/actions/_admin/app.js, js/actions/_admin/routing/app.js, js/actions/_admin/server/app.js, js/actions/_admin/database/app.js, arangod/RestHandler/RestShutdownHandler.cpp, arangod/RestHandler/RestAdminLogHandler.cpp, js/actions/api-tasks.js, js/actions/api-endpoint.js, arangod/RestHandler/RestVersionHandler.cpp, js/actions/api-system.js" + } + }, + "/_admin/sleep": { + "get": { + "description": "\n\n
The call returns an object with the attribute duration. This takes as many seconds as the duration argument says.
", + "parameters": [ + { + "description": "wait `duration` seconds until the reply is sent.
", + "format": "integer", + "in": "path", + "name": "duration", + "required": true, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "Sleep was conducted successfully.
" + } + }, + "summary": " Sleep for a specified amount of seconds", + "tags": [ + "Administration" + ], + "x-examples": [], + "x-filename": "Administration - js/actions/_admin/app.js, js/actions/_admin/routing/app.js, js/actions/_admin/server/app.js, js/actions/_admin/database/app.js, arangod/RestHandler/RestShutdownHandler.cpp, arangod/RestHandler/RestAdminLogHandler.cpp, js/actions/api-tasks.js, js/actions/api-endpoint.js, arangod/RestHandler/RestVersionHandler.cpp, js/actions/api-system.js" + } + }, + "/_admin/statistics": { + "get": { + "description": "\n\n
Returns the statistics information. The returned object contains the statistics figures grouped together according to the description returned by _admin/statistics-description. For instance, to access a figure userTime from the group system, you first select the sub-object describing the group stored in system and in that sub-object the value for userTime is stored in the attribute of the same name.
In case of a distribution, the returned object contains the total count in count and the distribution list in counts. The sum (or total) of the individual values is returned in sum.

Example:

shell> curl --dump - http://localhost:8529/_admin/statistics\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"time\" : 1443627584.140516, \n  \"system\" : { \n    \"minorPageFaults\" : 137584, \n    \"majorPageFaults\" : 5, \n    \"userTime\" : 36.03, \n    \"systemTime\" : 1.34, \n    \"numberOfThreads\" : 23, \n    \"residentSize\" : 192217088, \n    \"residentSizePercent\" : 0.022905696552235805, \n    \"virtualSize\" : 3688673280 \n  }, \n  \"client\" : { \n    \"httpConnections\" : 1, \n    \"connectionTime\" : { \n      \"sum\" : 0.00033211708068847656, \n      \"count\" : 1, \n      \"counts\" : [ \n        1, \n        0, \n        0, \n        0 \n      ] \n    }, \n    \"totalTime\" : { \n      \"sum\" : 26.437366724014282, \n      \"count\" : 3555, \n      \"counts\" : [ \n        3204, \n        267, \n        60, \n        16, \n        3, \n        1, \n        4 \n      ] \n    }, \n    \"requestTime\" : { \n      \"sum\" : 14.136068344116211, \n      \"count\" : 3555, \n      \"counts\" : [ \n        3297, \n        219, \n        26, \n        8, \n        3, \n        2, \n        0 \n      ] \n    }, \n    \"queueTime\" : { \n      \"sum\" : 0.09597921371459961, \n      \"count\" : 3526, \n      \"counts\" : [ \n        3526, \n        0, \n        0, \n        0, \n        0, \n        0, \n        0 \n      ] \n    }, \n    \"ioTime\" : { \n      \"sum\" : 12.205319166183472, \n      \"count\" : 3555, \n      \"counts\" : [ \n        3438, \n        98, \n        12, \n        4, \n        0, \n        0, \n        3 \n      ] \n    }, \n    \"bytesSent\" : { \n      \"sum\" : 1578763, \n      \"count\" : 3555, \n      \"counts\" : [ \n        389, \n        2939, \n        15, \n        212, \n        0, \n        0 \n      ] \n    }, \n    \"bytesReceived\" : { \n      \"sum\" : 796270, \n      \"count\" : 3555, \n      \"counts\" : [ \n        3344, \n        211, \n        0, \n        0, \n        0, \n        0 \n      ] \n    } \n  }, \n  \"http\" : { \n    \"requestsTotal\" : 3567, \n    \"requestsAsync\" : 0, \n    \"requestsGet\" : 597, \n    \"requestsHead\" : 65, \n    \"requestsPost\" : 2652, \n    \"requestsPut\" : 110, \n    \"requestsPatch\" : 3, \n    \"requestsDelete\" : 139, \n    \"requestsOptions\" : 0, \n    \"requestsOther\" : 1 \n  }, \n  \"server\" : { \n    \"uptime\" : 47.32217192649841, \n    \"physicalMemory\" : 8391671808 \n  }, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
", + "parameters": [], + "responses": { + "200": { + "description": "Statistics were returned successfully.
" + } + }, + "summary": " Read the statistics", + "tags": [ + "Administration" + ], + "x-examples": [], + "x-filename": "Administration - js/actions/_admin/app.js, js/actions/_admin/routing/app.js, js/actions/_admin/server/app.js, js/actions/_admin/database/app.js, arangod/RestHandler/RestShutdownHandler.cpp, arangod/RestHandler/RestAdminLogHandler.cpp, js/actions/api-tasks.js, js/actions/api-endpoint.js, arangod/RestHandler/RestVersionHandler.cpp, js/actions/api-system.js" + } + }, + "/_admin/statistics-description": { + "get": { + "description": "\n\n
Returns a description of the statistics returned by /_admin/statistics. The returned objects contains an array of statistics groups in the attribute groups and an array of statistics figures in the attribute figures.
A statistics group is described by
  • group: The identifier of the group.
  • name: The name of the group.
  • description: A description of the group.
A statistics figure is described by
  • group: The identifier of the group to which this figure belongs.
  • identifier: The identifier of the figure. It is unique within the group.
  • name: The name of the figure.
  • description: A description of the figure.
  • type: Either current, accumulated, or distribution.
  • cuts: The distribution vector.
  • units: Units in which the figure is measured.

Example:

shell> curl --dump - http://localhost:8529/_admin/statistics-description\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"groups\" : [ \n    { \n      \"group\" : \"system\", \n      \"name\" : \"Process Statistics\", \n      \"description\" : \"Statistics about the ArangoDB process\" \n    }, \n    { \n      \"group\" : \"client\", \n      \"name\" : \"Client Connection Statistics\", \n      \"description\" : \"Statistics about the connections.\" \n    }, \n    { \n      \"group\" : \"http\", \n      \"name\" : \"HTTP Request Statistics\", \n      \"description\" : \"Statistics about the HTTP requests.\" \n    }, \n    { \n      \"group\" : \"server\", \n      \"name\" : \"Server Statistics\", \n      \"description\" : \"Statistics about the ArangoDB server\" \n    } \n  ], \n  \"figures\" : [ \n    { \n      \"group\" : \"system\", \n      \"identifier\" : \"userTime\", \n      \"name\" : \"User Time\", \n      \"description\" : \"Amount of time that this process has been scheduled in user mode, measured in seconds.\", \n      \"type\" : \"accumulated\", \n      \"units\" : \"seconds\" \n    }, \n    { \n      \"group\" : \"system\", \n      \"identifier\" : \"systemTime\", \n      \"name\" : \"System Time\", \n      \"description\" : \"Amount of time that this process has been scheduled in kernel mode, measured in seconds.\", \n      \"type\" : \"accumulated\", \n      \"units\" : \"seconds\" \n    }, \n    { \n      \"group\" : \"system\", \n      \"identifier\" : \"numberOfThreads\", \n      \"name\" : \"Number of Threads\", \n      \"description\" : \"Number of threads in the arangod process.\", \n      \"type\" : \"current\", \n      \"units\" : \"number\" \n    }, \n    { \n      \"group\" : \"system\", \n      \"identifier\" : \"residentSize\", \n      \"name\" : \"Resident Set Size\", \n      \"description\" : \"The total size of the number of pages the process has in real memory. This is just the pages which count toward text, data, or stack space. This does not include pages which have not been demand-loaded in, or which are swapped out. The resident set size is reported in bytes.\", \n      \"type\" : \"current\", \n      \"units\" : \"bytes\" \n    }, \n    { \n      \"group\" : \"system\", \n      \"identifier\" : \"residentSizePercent\", \n      \"name\" : \"Resident Set Size\", \n      \"description\" : \"The percentage of physical memory used by the process as resident set size.\", \n      \"type\" : \"current\", \n      \"units\" : \"percent\" \n    }, \n    { \n      \"group\" : \"system\", \n      \"identifier\" : \"virtualSize\", \n      \"name\" : \"Virtual Memory Size\", \n      \"description\" : \"On Windows, this figure contains the total amount of memory that the memory manager has committed for the arangod process. On other systems, this figure contains The size of the virtual memory the process is using.\", \n      \"type\" : \"current\", \n      \"units\" : \"bytes\" \n    }, \n    { \n      \"group\" : \"system\", \n      \"identifier\" : \"minorPageFaults\", \n      \"name\" : \"Minor Page Faults\", \n      \"description\" : \"The number of minor faults the process has made which have not required loading a memory page from disk. This figure is not reported on Windows.\", \n      \"type\" : \"accumulated\", \n      \"units\" : \"number\" \n    }, \n    { \n      \"group\" : \"system\", \n      \"identifier\" : \"majorPageFaults\", \n      \"name\" : \"Major Page Faults\", \n      \"description\" : \"On Windows, this figure contains the total number of page faults. On other system, this figure contains the number of major faults the process has made which have required loading a memory page from disk.\", \n      \"type\" : \"accumulated\", \n      \"units\" : \"number\" \n    }, \n    { \n      \"group\" : \"client\", \n      \"identifier\" : \"httpConnections\", \n      \"name\" : \"Client Connections\", \n      \"description\" : \"The number of connections that are currently open.\", \n      \"type\" : \"current\", \n      \"units\" : \"number\" \n    }, \n    { \n      \"group\" : \"client\", \n      \"identifier\" : \"totalTime\", \n      \"name\" : \"Total Time\", \n      \"description\" : \"Total time needed to answer a request.\", \n      \"type\" : \"distribution\", \n      \"cuts\" : [ \n        0.01, \n        0.05, \n        0.1, \n        0.2, \n        0.5, \n        1 \n      ], \n      \"units\" : \"seconds\" \n    }, \n    { \n      \"group\" : \"client\", \n      \"identifier\" : \"requestTime\", \n      \"name\" : \"Request Time\", \n      \"description\" : \"Request time needed to answer a request.\", \n      \"type\" : \"distribution\", \n      \"cuts\" : [ \n        0.01, \n        0.05, \n        0.1, \n        0.2, \n        0.5, \n        1 \n      ], \n      \"units\" : \"seconds\" \n    }, \n    { \n      \"group\" : \"client\", \n      \"identifier\" : \"queueTime\", \n      \"name\" : \"Queue Time\", \n      \"description\" : \"Queue time needed to answer a request.\", \n      \"type\" : \"distribution\", \n      \"cuts\" : [ \n        0.01, \n        0.05, \n        0.1, \n        0.2, \n        0.5, \n        1 \n      ], \n      \"units\" : \"seconds\" \n    }, \n    { \n      \"group\" : \"client\", \n      \"identifier\" : \"bytesSent\", \n      \"name\" : \"Bytes Sent\", \n      \"description\" : \"Bytes sents for a request.\", \n      \"type\" : \"distribution\", \n      \"cuts\" : [ \n        250, \n        1000, \n        2000, \n        5000, \n        10000 \n      ], \n      \"units\" : \"bytes\" \n    }, \n    { \n      \"group\" : \"client\", \n      \"identifier\" : \"bytesReceived\", \n      \"name\" : \"Bytes Received\", \n      \"description\" : \"Bytes receiveds for a request.\", \n      \"type\" : \"distribution\", \n      \"cuts\" : [ \n        250, \n        1000, \n        2000, \n        5000, \n        10000 \n      ], \n      \"units\" : \"bytes\" \n    }, \n    { \n      \"group\" : \"client\", \n      \"identifier\" : \"connectionTime\", \n      \"name\" : \"Connection Time\", \n      \"description\" : \"Total connection time of a client.\", \n      \"type\" : \"distribution\", \n      \"cuts\" : [ \n        0.1, \n        1, \n        60 \n      ], \n      \"units\" : \"seconds\" \n    }, \n    { \n      \"group\" : \"http\", \n      \"identifier\" : \"requestsTotal\", \n      \"name\" : \"Total requests\", \n      \"description\" : \"Total number of HTTP requests.\", \n      \"type\" : \"accumulated\", \n      \"units\" : \"number\" \n    }, \n    { \n      \"group\" : \"http\", \n      \"identifier\" : \"requestsAsync\", \n      \"name\" : \"Async requests\", \n      \"description\" : \"Number of asynchronously executed HTTP requests.\", \n      \"type\" : \"accumulated\", \n      \"units\" : \"number\" \n    }, \n    { \n      \"group\" : \"http\", \n      \"identifier\" : \"requestsGet\", \n      \"name\" : \"HTTP GET requests\", \n      \"description\" : \"Number of HTTP GET requests.\", \n      \"type\" : \"accumulated\", \n      \"units\" : \"number\" \n    }, \n    { \n      \"group\" : \"http\", \n      \"identifier\" : \"requestsHead\", \n      \"name\" : \"HTTP HEAD requests\", \n      \"description\" : \"Number of HTTP HEAD requests.\", \n      \"type\" : \"accumulated\", \n      \"units\" : \"number\" \n    }, \n    { \n      \"group\" : \"http\", \n      \"identifier\" : \"requestsPost\", \n      \"name\" : \"HTTP POST requests\", \n      \"description\" : \"Number of HTTP POST requests.\", \n      \"type\" : \"accumulated\", \n      \"units\" : \"number\" \n    }, \n    { \n      \"group\" : \"http\", \n      \"identifier\" : \"requestsPut\", \n      \"name\" : \"HTTP PUT requests\", \n      \"description\" : \"Number of HTTP PUT requests.\", \n      \"type\" : \"accumulated\", \n      \"units\" : \"number\" \n    }, \n    { \n      \"group\" : \"http\", \n      \"identifier\" : \"requestsPatch\", \n      \"name\" : \"HTTP PATCH requests\", \n      \"description\" : \"Number of HTTP PATCH requests.\", \n      \"type\" : \"accumulated\", \n      \"units\" : \"number\" \n    }, \n    { \n      \"group\" : \"http\", \n      \"identifier\" : \"requestsDelete\", \n      \"name\" : \"HTTP DELETE requests\", \n      \"description\" : \"Number of HTTP DELETE requests.\", \n      \"type\" : \"accumulated\", \n      \"units\" : \"number\" \n    }, \n    { \n      \"group\" : \"http\", \n      \"identifier\" : \"requestsOptions\", \n      \"name\" : \"HTTP OPTIONS requests\", \n      \"description\" : \"Number of HTTP OPTIONS requests.\", \n      \"type\" : \"accumulated\", \n      \"units\" : \"number\" \n    }, \n    { \n      \"group\" : \"http\", \n      \"identifier\" : \"requestsOther\", \n      \"name\" : \"other HTTP requests\", \n      \"description\" : \"Number of other HTTP requests.\", \n      \"type\" : \"accumulated\", \n      \"units\" : \"number\" \n    }, \n    { \n      \"group\" : \"server\", \n      \"identifier\" : \"uptime\", \n      \"name\" : \"Server Uptime\", \n      \"description\" : \"Number of seconds elapsed since server start.\", \n      \"type\" : \"current\", \n      \"units\" : \"seconds\" \n    }, \n    { \n      \"group\" : \"server\", \n      \"identifier\" : \"physicalMemory\", \n      \"name\" : \"Physical Memory\", \n      \"description\" : \"Physical memory in bytes.\", \n      \"type\" : \"current\", \n      \"units\" : \"bytes\" \n    } \n  ], \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
", + "parameters": [], + "responses": { + "200": { + "description": "Description was returned successfully.
" + } + }, + "summary": " Statistics description", + "tags": [ + "Administration" + ], + "x-examples": [], + "x-filename": "Administration - js/actions/_admin/app.js, js/actions/_admin/routing/app.js, js/actions/_admin/server/app.js, js/actions/_admin/database/app.js, arangod/RestHandler/RestShutdownHandler.cpp, arangod/RestHandler/RestAdminLogHandler.cpp, js/actions/api-tasks.js, js/actions/api-endpoint.js, arangod/RestHandler/RestVersionHandler.cpp, js/actions/api-system.js" + } + }, + "/_admin/test": { + "post": { + "description": "free style json body\n\n
Executes the specified tests on the server and returns an object with the test results. The object has an attribute \"error\" which states whether any error occurred. The object also has an attribute \"passed\" which indicates which tests passed and which did not.
", + "parameters": [ + { + "description": "A JSON object containing an attribute tests which lists the files containing the test suites.
", + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "additionalProperties": {}, + "type": "object" + }, + "x-description-offset": 0 + } + ], + "summary": " Runs tests on server", + "tags": [ + "Administration" + ], + "x-examples": [], + "x-filename": "Administration - js/actions/_admin/app.js, js/actions/_admin/routing/app.js, js/actions/_admin/server/app.js, js/actions/_admin/database/app.js, arangod/RestHandler/RestShutdownHandler.cpp, arangod/RestHandler/RestAdminLogHandler.cpp, js/actions/api-tasks.js, js/actions/api-endpoint.js, arangod/RestHandler/RestVersionHandler.cpp, js/actions/api-system.js" + } + }, + "/_admin/time": { + "get": { + "description": "\n\n
The call returns an object with the attribute time. This contains the current system time as a Unix timestamp with microsecond precision.
", + "parameters": [], + "responses": { + "200": { + "description": "Time was returned successfully.
" + } + }, + "summary": " Return system time", + "tags": [ + "Administration" + ], + "x-examples": [], + "x-filename": "Administration - js/actions/_admin/app.js, js/actions/_admin/routing/app.js, js/actions/_admin/server/app.js, js/actions/_admin/database/app.js, arangod/RestHandler/RestShutdownHandler.cpp, arangod/RestHandler/RestAdminLogHandler.cpp, js/actions/api-tasks.js, js/actions/api-endpoint.js, arangod/RestHandler/RestVersionHandler.cpp, js/actions/api-system.js" + } + }, + "/_admin/wal/flush": { + "put": { + "description": "\n\n
Flushes the write-ahead log. By flushing the currently active write-ahead logfile, the data in it can be transferred to collection journals and datafiles. This is useful to ensure that all data for a collection is present in the collection journals and datafiles, for example, when dumping the data of a collection.
", + "parameters": [ + { + "description": "Whether or not the operation should block until the not-yet synchronized data in the write-ahead log was synchronized to disk.
", + "in": "query", + "name": "waitForSync", + "required": false, + "type": "boolean" + }, + { + "description": "Whether or not the operation should block until the data in the flushed log has been collected by the write-ahead log garbage collector. Note that setting this option to true might block for a long time if there are long-running transactions and the write-ahead log garbage collector cannot finish garbage collection.
", + "in": "query", + "name": "waitForCollector", + "required": false, + "type": "boolean" + } + ], + "responses": { + "200": { + "description": "Is returned if the operation succeeds.
" + }, + "405": { + "description": "is returned when an invalid HTTP method is used.
" + } + }, + "summary": " Flushes the write-ahead log", + "tags": [ + "wal" + ], + "x-examples": [], + "x-filename": "wal - js/actions/_admin/wal/app.js" + } + }, + "/_admin/wal/properties": { + "get": { + "description": "\n\n
Retrieves the configuration of the write-ahead log. The result is a JSON object with the following attributes:
  • allowOversizeEntries: whether or not operations that are bigger than a single logfile can be executed and stored
  • logfileSize: the size of each write-ahead logfile
  • historicLogfiles: the maximum number of historic logfiles to keep
  • reserveLogfiles: the maximum number of reserve logfiles that ArangoDB allocates in the background
  • syncInterval: the interval for automatic synchronization of not-yet synchronized write-ahead log data (in milliseconds)
  • throttleWait: the maximum wait time that operations will wait before they get aborted if case of write-throttling (in milliseconds)
  • throttleWhenPending: the number of unprocessed garbage-collection operations that, when reached, will activate write-throttling. A value of 0 means that write-throttling will not be triggered.

Example:

shell> curl --dump - http://localhost:8529/_admin/wal/properties\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"allowOversizeEntries\" : true, \n  \"logfileSize\" : 33554432, \n  \"historicLogfiles\" : 10, \n  \"reserveLogfiles\" : 1, \n  \"syncInterval\" : 100, \n  \"throttleWait\" : 15000, \n  \"throttleWhenPending\" : 0, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
", + "parameters": [], + "responses": { + "200": { + "description": "Is returned if the operation succeeds.
" + }, + "405": { + "description": "is returned when an invalid HTTP method is used.

" + } + }, + "summary": " Retrieves the configuration of the write-ahead log", + "tags": [ + "wal" + ], + "x-examples": [], + "x-filename": "wal - js/actions/_admin/wal/app.js" + }, + "put": { + "description": "\n\n
Configures the behavior of the write-ahead log. The body of the request must be a JSON object with the following attributes:
  • allowOversizeEntries: whether or not operations that are bigger than a single logfile can be executed and stored
  • logfileSize: the size of each write-ahead logfile
  • historicLogfiles: the maximum number of historic logfiles to keep
  • reserveLogfiles: the maximum number of reserve logfiles that ArangoDB allocates in the background
  • throttleWait: the maximum wait time that operations will wait before they get aborted if case of write-throttling (in milliseconds)
  • throttleWhenPending: the number of unprocessed garbage-collection operations that, when reached, will activate write-throttling. A value of 0 means that write-throttling will not be triggered.
Specifying any of the above attributes is optional. Not specified attributes will be ignored and the configuration for them will not be modified.

Example:

shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_admin/wal/properties <<EOF\n{ \n  \"logfileSize\" : 33554432, \n  \"allowOversizeEntries\" : true \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"allowOversizeEntries\" : true, \n  \"logfileSize\" : 33554432, \n  \"historicLogfiles\" : 10, \n  \"reserveLogfiles\" : 1, \n  \"syncInterval\" : 100, \n  \"throttleWait\" : 15000, \n  \"throttleWhenPending\" : 0, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
", + "parameters": [], + "responses": { + "200": { + "description": "Is returned if the operation succeeds.
" + }, + "405": { + "description": "is returned when an invalid HTTP method is used.

" + } + }, + "summary": " Configures the write-ahead log", + "tags": [ + "wal" + ], + "x-examples": [], + "x-filename": "wal - js/actions/_admin/wal/app.js" + } + }, + "/_admin/wal/transactions": { + "get": { + "description": "\n\n
Returns information about the currently running transactions. The result is a JSON object with the following attributes:
  • runningTransactions: number of currently running transactions
  • minLastCollected: minimum id of the last collected logfile (at the start of each running transaction). This is null if no transaction is running.
  • minLastSealed: minimum id of the last sealed logfile (at the start of each running transaction). This is null if no transaction is running.

Example:

shell> curl --dump - http://localhost:8529/_admin/wal/transactions\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"runningTransactions\" : 0, \n  \"minLastCollected\" : null, \n  \"minLastSealed\" : null, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
", + "parameters": [], + "responses": { + "200": { + "description": "Is returned if the operation succeeds.
" + }, + "405": { + "description": "is returned when an invalid HTTP method is used.

" + } + }, + "summary": " Returns information about the currently running transactions", + "tags": [ + "wal" + ], + "x-examples": [], + "x-filename": "wal - js/actions/_admin/wal/app.js" + } + }, + "/_api/aqlfunction": { + "get": { + "description": "\n\nReturns all registered AQL user functions.
The call will return a JSON array with all user functions found. Each user function will at least have the following attributes:
  • name: The fully qualified name of the user function
  • code: A string representation of the function body

Example:

shell> curl --dump - http://localhost:8529/_api/aqlfunction\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n[ \n  { \n    \"name\" : \"myfunctions::temperature::celsiustofahrenheit\", \n    \"code\" : \"function (celsius) { return celsius * 1.8 + 32; }\" \n  } \n]\n

\n
", + "parameters": [ + { + "description": "Returns all registered AQL user functions from namespace namespace.
", + "in": "query", + "name": "namespace", + "required": false, + "type": "string" + } + ], + "responses": { + "200": { + "description": "if success HTTP 200 is returned.
" + } + }, + "summary": " Return registered AQL user functions", + "tags": [ + "AQL" + ], + "x-examples": [], + "x-filename": "AQL - arangod/RestHandler/RestQueryHandler.cpp, js/actions/api-aqlfunction.js, js/actions/api-explain.js, arangod/RestHandler/RestQueryCacheHandler.cpp" + }, + "post": { + "description": "**A json post document with these Properties is required:**
  • isDeterministic: an optional boolean value to indicate that the function results are fully deterministic (function return value solely depends on the input value and return value is the same for repeated calls with same input). The isDeterministic attribute is currently not used but may be used later for optimisations.
  • code: a string representation of the function body.
  • name: the fully qualified name of the user functions.
\n\n
In case of success, the returned JSON object has the following properties:
  • error: boolean flag to indicate that an error occurred (false in this case)
  • code: the HTTP status code
The body of the response will contain a JSON object with additional error details. The object has the following attributes:
  • error: boolean flag to indicate that an error occurred (true in this case)
  • code: the HTTP status code
  • errorNum: the server error number
  • errorMessage: a descriptive error message

Example:

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/aqlfunction <<EOF\n{ \n  \"name\" : \"myfunctions::temperature::celsiustofahrenheit\", \n  \"code\" : \"function (celsius) { return celsius * 1.8 + 32; }\" \n}\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"error\" : false, \n  \"code\" : 201 \n}\n

\n
", + "parameters": [ + { + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "$ref": "#/definitions/JSF_post_api_aqlfunction" + }, + "x-description-offset": 59 + } + ], + "responses": { + "200": { + "description": "If the function already existed and was replaced by the call, the server will respond with HTTP 200.
" + }, + "201": { + "description": "If the function can be registered by the server, the server will respond with HTTP 201.
" + }, + "400": { + "description": "If the JSON representation is malformed or mandatory data is missing from the request, the server will respond with HTTP 400.
" + } + }, + "summary": " Create AQL user function", + "tags": [ + "AQL" + ], + "x-examples": [], + "x-filename": "AQL - arangod/RestHandler/RestQueryHandler.cpp, js/actions/api-aqlfunction.js, js/actions/api-explain.js, arangod/RestHandler/RestQueryCacheHandler.cpp" + } + }, + "/_api/aqlfunction/{name}": { + "delete": { + "description": "\n\n
Removes an existing AQL user function, identified by name.
In case of success, the returned JSON object has the following properties:
  • error: boolean flag to indicate that an error occurred (false in this case)
  • code: the HTTP status code
The body of the response will contain a JSON object with additional error details. The object has the following attributes:
  • error: boolean flag to indicate that an error occurred (true in this case)
  • code: the HTTP status code
  • errorNum: the server error number
  • errorMessage: a descriptive error message

Example: deletes a function:

shell> curl -X DELETE --dump - http://localhost:8529/_api/aqlfunction/square::x::y\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
Example: function not found:

shell> curl -X DELETE --dump - http://localhost:8529/_api/aqlfunction/myfunction::x::y\n\nHTTP/1.1 404 Not Found\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"error\" : true, \n  \"code\" : 404, \n  \"errorNum\" : 1582, \n  \"errorMessage\" : \"user function '%s()' not found\" \n}\n

\n
", + "parameters": [ + { + "description": "the name of the AQL user function.
", + "format": "string", + "in": "path", + "name": "name", + "required": true, + "type": "string" + }, + { + "description": "If set to true, then the function name provided in name is treated as a namespace prefix, and all functions in the specified namespace will be deleted. If set to false, the function name provided in name must be fully qualified, including any namespaces.
", + "in": "query", + "name": "group", + "required": false, + "type": "string" + } + ], + "responses": { + "200": { + "description": "If the function can be removed by the server, the server will respond with HTTP 200.
" + }, + "400": { + "description": "If the user function name is malformed, the server will respond with HTTP 400.
" + }, + "404": { + "description": "If the specified user user function does not exist, the server will respond with HTTP 404.
" + } + }, + "summary": " Remove existing AQL user function", + "tags": [ + "AQL" + ], + "x-examples": [], + "x-filename": "AQL - arangod/RestHandler/RestQueryHandler.cpp, js/actions/api-aqlfunction.js, js/actions/api-explain.js, arangod/RestHandler/RestQueryCacheHandler.cpp" + } + }, + "/_api/batch": { + "post": { + "description": "free style json body\n\nExecutes a batch request. A batch request can contain any number of other requests that can be sent to ArangoDB in isolation. The benefit of using batch requests is that batching requests requires less client/server roundtrips than when sending isolated requests.
All parts of a batch request are executed serially on the server. The server will return the results of all parts in a single response when all parts are finished.
Technically, a batch request is a multipart HTTP request, with content-type `multipart/form-data`. A batch request consists of an envelope and the individual batch part actions. Batch part actions are \"regular\" HTTP requests, including full header and an optional body. Multiple batch parts are separated by a boundary identifier. The boundary identifier is declared in the batch envelope. The MIME content-type for each individual batch part must be `application/x-arango-batchpart`.
Please note that when constructing the individual batch parts, you must use CRLF (`\\r\\n`) as the line terminator as in regular HTTP messages.
The response sent by the server will be an `HTTP 200` response, with an optional error summary header `x-arango-errors`. This header contains the number of batch part operations that failed with an HTTP error code of at least 400. This header is only present in the response if the number of errors is greater than zero.
The response sent by the server is a multipart response, too. It contains the individual HTTP responses for all batch parts, including the full HTTP result header (with status code and other potential headers) and an optional result body. The individual batch parts in the result are seperated using the same boundary value as specified in the request.
The order of batch parts in the response will be the same as in the original client request. Client can additionally use the `Content-Id` MIME header in a batch part to define an individual id for each batch part. The server will return this id is the batch part responses, too.

Example: Sending a batch request with five batch parts:
  • GET /_api/version
  • DELETE /_api/collection/products
  • POST /_api/collection/products
  • GET /_api/collection/products/figures
  • DELETE /_api/collection/products
The boundary (`SomeBoundaryValue`) is passed to the server in the HTTP `Content-Type` HTTP header. Please note the reply is not displayed all accurate.


shell> curl -X POST --header 'Content-Type: multipart/form-data; boundary=SomeBoundaryValue' --data-binary @- --dump - http://localhost:8529/_api/batch <<EOF\n--SomeBoundaryValue\r\nContent-Type: application/x-arango-batchpart\r\nContent-Id: myId1\r\n\r\nGET /_api/version HTTP/1.1\r\n\r\n--SomeBoundaryValue\r\nContent-Type: application/x-arango-batchpart\r\nContent-Id: myId2\r\n\r\nDELETE /_api/collection/products HTTP/1.1\r\n\r\n--SomeBoundaryValue\r\nContent-Type: application/x-arango-batchpart\r\nContent-Id: someId\r\n\r\nPOST /_api/collection/products HTTP/1.1\r\n\r\n{ \"name\": \"products\" }\r\n\r\n--SomeBoundaryValue\r\nContent-Type: application/x-arango-batchpart\r\nContent-Id: nextId\r\n\r\nGET /_api/collection/products/figures HTTP/1.1\r\n\r\n--SomeBoundaryValue\r\nContent-Type: application/x-arango-batchpart\r\nContent-Id: otherId\r\n\r\nDELETE /_api/collection/products HTTP/1.1\r\n--SomeBoundaryValue--\r\n\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: multipart/form-data; boundary=SomeBoundaryValue\nx-arango-errors: 1\n\n\"--SomeBoundaryValue\\r\\nContent-Type: application/x-arango-batchpart\\r\\nContent-Id: myId1\\r\\n\\r\\nHTTP/1.1 200 OK\\r\\nContent-Type: application/json; charset=utf-8\\r\\nContent-Length: 43\\r\\n\\r\\n{\\\"server\\\":\\\"arango\\\",\\\"version\\\":\\\"2.7.0-devel\\\"}\\r\\n--SomeBoundaryValue\\r\\nContent-Type: application/x-arango-batchpart\\r\\nContent-Id: myId2\\r\\n\\r\\nHTTP/1.1 404 Not Found\\r\\nContent-Type: application/json; charset=utf-8\\r\\nContent-Length: 88\\r\\n\\r\\n{\\\"error\\\":true,\\\"code\\\":404,\\\"errorNum\\\":1203,\\\"errorMessage\\\":\\\"unknown collection 'products'\\\"}\\r\\n--SomeBoundaryValue\\r\\nContent-Type: application/x-arango-batchpart\\r\\nContent-Id: someId\\r\\n\\r\\nHTTP/1.1 200 OK\\r\\nLocation: /_db/_system/_api/collection/products\\r\\nContent-Type: application/json; charset=utf-8\\r\\nContent-Length: 137\\r\\n\\r\\n{\\\"id\\\":\\\"619502023\\\",\\\"name\\\":\\\"products\\\",\\\"waitForSync\\\":false,\\\"isVolatile\\\":false,\\\"isSystem\\\":false,\\\"status\\\":3,\\\"type\\\":2,\\\"error\\\":false,\\\"code\\\":200}\\r\\n--SomeBoundaryValue\\r\\nContent-Type: application/x-arango-batchpart\\r\\nContent-Id: nextId\\r\\n\\r\\nHTTP/1.1 200 OK\\r\\nLocation: /_db/_system/_api/collection/products/figures\\r\\nContent-Type: application/json; charset=utf-8\\r\\nContent-Length: 635\\r\\n\\r\\n{\\\"id\\\":\\\"619502023\\\",\\\"name\\\":\\\"products\\\",\\\"isSystem\\\":false,\\\"doCompact\\\":true,\\\"isVolatile\\\":false,\\\"journalSize\\\":1048576,\\\"keyOptions\\\":{\\\"type\\\":\\\"traditional\\\",\\\"allowUserKeys\\\":true},\\\"waitForSync\\\":false,\\\"indexBuckets\\\":8,\\\"count\\\":0,\\\"figures\\\":{\\\"alive\\\":{\\\"count\\\":0,\\\"size\\\":0},\\\"dead\\\":{\\\"count\\\":0,\\\"size\\\":0,\\\"deletion\\\":0},\\\"datafiles\\\":{\\\"count\\\":0,\\\"fileSize\\\":0},\\\"journals\\\":{\\\"count\\\":0,\\\"fileSize\\\":0},\\\"compactors\\\":{\\\"count\\\":0,\\\"fileSize\\\":0},\\\"shapefiles\\\":{\\\"count\\\":0,\\\"fileSize\\\":0},\\\"shapes\\\":{\\\"count\\\":0,\\\"size\\\":0},\\\"attributes\\\":{\\\"count\\\":0,\\\"size\\\":0},\\\"indexes\\\":{\\\"count\\\":1,\\\"size\\\":16064},\\\"lastTick\\\":\\\"0\\\",\\\"uncollectedLogfileEntries\\\":0},\\\"status\\\":3,\\\"type\\\":2,\\\"error\\\":false,\\\"code\\\":200}\\r\\n--SomeBoundaryValue\\r\\nContent-Type: application/x-arango-batchpart\\r\\nContent-Id: otherId\\r\\n\\r\\nHTTP/1.1 200 OK\\r\\nContent-Type: application/json; charset=utf-8\\r\\nContent-Length: 43\\r\\n\\r\\n{\\\"id\\\":\\\"619502023\\\",\\\"error\\\":false,\\\"code\\\":200}\\r\\n--SomeBoundaryValue--\"\n

\n
Example: Sending a batch request, setting the boundary implicitly (the server will in this case try to find the boundary at the beginning of the request body).

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/batch <<EOF\n--SomeBoundaryValue\r\nContent-Type: application/x-arango-batchpart\r\n\r\nDELETE /_api/collection/notexisting1 HTTP/1.1\r\n\r\n--SomeBoundaryValue\r\nContent-Type: application/x-arango-batchpart\r\n\r\nDELETE /_api/collection/notexisting2 HTTP/1.1\r\n--SomeBoundaryValue--\r\n\nEOF\n\nHTTP/1.1 200 OK\nx-arango-errors: 2\n\n\"--SomeBoundaryValue\\r\\nContent-Type: application/x-arango-batchpart\\r\\n\\r\\nHTTP/1.1 404 Not Found\\r\\nContent-Type: application/json; charset=utf-8\\r\\nContent-Length: 92\\r\\n\\r\\n{\\\"error\\\":true,\\\"code\\\":404,\\\"errorNum\\\":1203,\\\"errorMessage\\\":\\\"unknown collection 'notexisting1'\\\"}\\r\\n--SomeBoundaryValue\\r\\nContent-Type: application/x-arango-batchpart\\r\\n\\r\\nHTTP/1.1 404 Not Found\\r\\nContent-Type: application/json; charset=utf-8\\r\\nContent-Length: 92\\r\\n\\r\\n{\\\"error\\\":true,\\\"code\\\":404,\\\"errorNum\\\":1203,\\\"errorMessage\\\":\\\"unknown collection 'notexisting2'\\\"}\\r\\n--SomeBoundaryValue--\"\n

\n
", + "parameters": [ + { + "description": "The multipart batch request, consisting of the envelope and the individual batch parts.
", + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "additionalProperties": {}, + "type": "object" + }, + "x-description-offset": 0 + } + ], + "responses": { + "200": { + "description": "is returned if the batch was received successfully. HTTP 200 is returned even if one or multiple batch part actions failed.
" + }, + "400": { + "description": "is returned if the batch envelope is malformed or incorrectly formatted. This code will also be returned if the content-type of the overall batch request or the individual MIME parts is not as expected.
" + }, + "405": { + "description": "is returned when an invalid HTTP method is used.
" + } + }, + "summary": "executes a batch request", + "tags": [ + "Bulk" + ], + "x-examples": [], + "x-filename": "Bulk - arangod/RestHandler/RestExportHandler.cpp, arangod/RestHandler/RestImportHandler.cpp, arangod/RestHandler/RestBatchHandler.cpp" + } + }, + "/_api/collection": { + "get": { + "description": "\n\nReturns an object with an attribute collections containing an array of all collection descriptions. The same information is also available in the names as an object with the collection names as keys.
By providing the optional URL parameter excludeSystem with a value of true, all system collections will be excluded from the response.

Example: Return information about all collections:

shell> curl --dump - http://localhost:8529/_api/collection\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"collections\" : [ \n    { \n      \"id\" : \"6216135\", \n      \"name\" : \"_queues\", \n      \"isSystem\" : true, \n      \"status\" : 3, \n      \"type\" : 2 \n    }, \n    { \n      \"id\" : \"5757383\", \n      \"name\" : \"_configuration\", \n      \"isSystem\" : true, \n      \"status\" : 3, \n      \"type\" : 2 \n    }, \n    { \n      \"id\" : \"22206919\", \n      \"name\" : \"animals\", \n      \"isSystem\" : false, \n      \"status\" : 3, \n      \"type\" : 2 \n    }, \n    { \n      \"id\" : \"14145991\", \n      \"name\" : \"_sessions\", \n      \"isSystem\" : true, \n      \"status\" : 3, \n      \"type\" : 2 \n    }, \n    { \n      \"id\" : \"2087367\", \n      \"name\" : \"_graphs\", \n      \"isSystem\" : true, \n      \"status\" : 3, \n      \"type\" : 2 \n    }, \n    { \n      \"id\" : \"2480583\", \n      \"name\" : \"_cluster_kickstarter_plans\", \n      \"isSystem\" : true, \n      \"status\" : 3, \n      \"type\" : 2 \n    }, \n    { \n      \"id\" : \"252359\", \n      \"name\" : \"_users\", \n      \"isSystem\" : true, \n      \"status\" : 3, \n      \"type\" : 2 \n    }, \n    { \n      \"id\" : \"14866887\", \n      \"name\" : \"_system_users_users\", \n      \"isSystem\" : true, \n      \"status\" : 3, \n      \"type\" : 2 \n    }, \n    { \n      \"id\" : \"4577735\", \n      \"name\" : \"_statisticsRaw\", \n      \"isSystem\" : true, \n      \"status\" : 3, \n      \"type\" : 2 \n    }, \n    { \n      \"id\" : \"2349511\", \n      \"name\" : \"_routing\", \n      \"isSystem\" : true, \n      \"status\" : 3, \n      \"type\" : 2 \n    }, \n    { \n      \"id\" : \"6347207\", \n      \"name\" : \"_jobs\", \n      \"isSystem\" : true, \n      \"status\" : 3, \n      \"type\" : 2 \n    }, \n    { \n      \"id\" : \"7199175\", \n      \"name\" : \"_apps\", \n      \"isSystem\" : true, \n      \"status\" : 3, \n      \"type\" : 2 \n    }, \n    { \n      \"id\" : \"4970951\", \n      \"name\" : \"_statistics\", \n      \"isSystem\" : true, \n      \"status\" : 3, \n      \"type\" : 2 \n    }, \n    { \n      \"id\" : \"5364167\", \n      \"name\" : \"_statistics15\", \n      \"isSystem\" : true, \n      \"status\" : 3, \n      \"type\" : 2 \n    }, \n    { \n      \"id\" : \"21354951\", \n      \"name\" : \"demo\", \n      \"isSystem\" : false, \n      \"status\" : 3, \n      \"type\" : 2 \n    }, \n    { \n      \"id\" : \"4446663\", \n      \"name\" : \"_aqlfunctions\", \n      \"isSystem\" : true, \n      \"status\" : 3, \n      \"type\" : 2 \n    }, \n    { \n      \"id\" : \"2218439\", \n      \"name\" : \"_modules\", \n      \"isSystem\" : true, \n      \"status\" : 3, \n      \"type\" : 2 \n    } \n  ], \n  \"names\" : { \n    \"_queues\" : { \n      \"id\" : \"6216135\", \n      \"name\" : \"_queues\", \n      \"isSystem\" : true, \n      \"status\" : 3, \n      \"type\" : 2 \n    }, \n    \"_configuration\" : { \n      \"id\" : \"5757383\", \n      \"name\" : \"_configuration\", \n      \"isSystem\" : true, \n      \"status\" : 3, \n      \"type\" : 2 \n    }, \n    \"animals\" : { \n      \"id\" : \"22206919\", \n      \"name\" : \"animals\", \n      \"isSystem\" : false, \n      \"status\" : 3, \n      \"type\" : 2 \n    }, \n    \"_sessions\" : { \n      \"id\" : \"14145991\", \n      \"name\" : \"_sessions\", \n      \"isSystem\" : true, \n      \"status\" : 3, \n      \"type\" : 2 \n    }, \n    \"_graphs\" : { \n      \"id\" : \"2087367\", \n      \"name\" : \"_graphs\", \n      \"isSystem\" : true, \n      \"status\" : 3, \n      \"type\" : 2 \n    }, \n    \"_cluster_kickstarter_plans\" : { \n      \"id\" : \"2480583\", \n      \"name\" : \"_cluster_kickstarter_plans\", \n      \"isSystem\" : true, \n      \"status\" : 3, \n      \"type\" : 2 \n    }, \n    \"_users\" : { \n      \"id\" : \"252359\", \n      \"name\" : \"_users\", \n      \"isSystem\" : true, \n      \"status\" : 3, \n      \"type\" : 2 \n    }, \n    \"_system_users_users\" : { \n      \"id\" : \"14866887\", \n      \"name\" : \"_system_users_users\", \n      \"isSystem\" : true, \n      \"status\" : 3, \n      \"type\" : 2 \n    }, \n    \"_statisticsRaw\" : { \n      \"id\" : \"4577735\", \n      \"name\" : \"_statisticsRaw\", \n      \"isSystem\" : true, \n      \"status\" : 3, \n      \"type\" : 2 \n    }, \n    \"_routing\" : { \n      \"id\" : \"2349511\", \n      \"name\" : \"_routing\", \n      \"isSystem\" : true, \n      \"status\" : 3, \n      \"type\" : 2 \n    }, \n    \"_jobs\" : { \n      \"id\" : \"6347207\", \n      \"name\" : \"_jobs\", \n      \"isSystem\" : true, \n      \"status\" : 3, \n      \"type\" : 2 \n    }, \n    \"_apps\" : { \n      \"id\" : \"7199175\", \n      \"name\" : \"_apps\", \n      \"isSystem\" : true, \n      \"status\" : 3, \n      \"type\" : 2 \n    }, \n    \"_statistics\" : { \n      \"id\" : \"4970951\", \n      \"name\" : \"_statistics\", \n      \"isSystem\" : true, \n      \"status\" : 3, \n      \"type\" : 2 \n    }, \n    \"_statistics15\" : { \n      \"id\" : \"5364167\", \n      \"name\" : \"_statistics15\", \n      \"isSystem\" : true, \n      \"status\" : 3, \n      \"type\" : 2 \n    }, \n    \"demo\" : { \n      \"id\" : \"21354951\", \n      \"name\" : \"demo\", \n      \"isSystem\" : false, \n      \"status\" : 3, \n      \"type\" : 2 \n    }, \n    \"_aqlfunctions\" : { \n      \"id\" : \"4446663\", \n      \"name\" : \"_aqlfunctions\", \n      \"isSystem\" : true, \n      \"status\" : 3, \n      \"type\" : 2 \n    }, \n    \"_modules\" : { \n      \"id\" : \"2218439\", \n      \"name\" : \"_modules\", \n      \"isSystem\" : true, \n      \"status\" : 3, \n      \"type\" : 2 \n    } \n  }, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
", + "parameters": [ + { + "description": "Whether or not system collections should be excluded from the result.
", + "in": "query", + "name": "excludeSystem", + "required": false, + "type": "boolean" + } + ], + "responses": { + "200": { + "description": "The list of collections
" + } + }, + "summary": "reads all collections", + "tags": [ + "Collections" + ], + "x-examples": [], + "x-filename": "Collections - js/actions/_api/collection/app.js" + }, + "post": { + "description": "**A json post document with these Properties is required:**
  • journalSize: The maximal size of a journal or datafile in bytes. The value must be at least `1048576` (1 MiB). (The default is a configuration parameter)
  • keyOptions: additional options for key generation. If specified, then keyOptions should be a JSON array containing the following attributes:
    • allowUserKeys: if set to true, then it is allowed to supply own key values in the _key attribute of a document. If set to false, then the key generator will solely be responsible for generating keys and supplying own key values in the _key attribute of documents is considered an error.
    • type: specifies the type of the key generator. The currently available generators are traditional and autoincrement.
    • increment: increment value for autoincrement key generator. Not used for other key generator types.
    • offset: Initial offset value for autoincrement key generator. Not used for other key generator types.
  • name: The name of the collection.
  • waitForSync: If true then the data is synchronized to disk before returning from a document create, update, replace or removal operation. (default: false)
  • doCompact: whether or not the collection will be compacted (default is true)
  • isVolatile: If true then the collection data is kept in-memory only and not made persistent. Unloading the collection will cause the collection data to be discarded. Stopping or re-starting the server will also cause full loss of data in the collection. Setting this option will make the resulting collection be slightly faster than regular collections because ArangoDB does not enforce any synchronization to disk and does not calculate any CRC checksums for datafiles (as there are no datafiles). This option should therefore be used for cache-type collections only, and not for data that cannot be re-created otherwise. (The default is false)
  • shardKeys: (The default is [ \"_key\" ]): in a cluster, this attribute determines which document attributes are used to determine the target shard for documents. Documents are sent to shards based on the values of their shard key attributes. The values of all shard key attributes in a document are hashed, and the hash value is used to determine the target shard. Note: Values of shard key attributes cannot be changed once set. This option is meaningless in a single server setup.
  • numberOfShards: (The default is 1): in a cluster, this value determines the number of shards to create for the collection. In a single server setup, this option is meaningless.
  • isSystem: If true, create a system collection. In this case collection-name should start with an underscore. End users should normally create non-system collections only. API implementors may be required to create system collections in very special occasions, but normally a regular collection will do. (The default is false)
  • type: (The default is 2): the type of the collection to create. The following values for type are valid:
    • 2: document collection
    • 3: edges collection
  • indexBuckets: The: number of buckets into which indexes using a hash table are split. The default is 16 and this number has to be a power of 2 and less than or equal to 1024.
    For very large collections one should increase this to avoid long pauses when the hash table has to be initially built or resized, since buckets are resized individually and can be initially built in parallel. For example, 64 might be a sensible value for a collection with 100 000 000 documents. Currently, only the edge index respects this value, but other index types might follow in future ArangoDB versions. Changes (see below) are applied when the collection is loaded the next time.
\n\nCreates an new collection with a given name. The request must contain an object with the following attributes.


Example:

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/collection <<EOF\n{ \n  \"name\" : \"testCollectionBasics\" \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\nlocation: /_db/_system/_api/collection/testCollectionBasics\n\n{ \n  \"id\" : \"619895239\", \n  \"name\" : \"testCollectionBasics\", \n  \"waitForSync\" : false, \n  \"isVolatile\" : false, \n  \"isSystem\" : false, \n  \"status\" : 3, \n  \"type\" : 2, \n  \"error\" : false, \n  \"code\" : 200 \n}\nshell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/collection <<EOF\n{ \n  \"name\" : \"testCollectionEdges\", \n  \"type\" : 3 \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\nlocation: /_db/_system/_api/collection/testCollectionEdges\n\n{ \n  \"id\" : \"620026311\", \n  \"name\" : \"testCollectionEdges\", \n  \"waitForSync\" : false, \n  \"isVolatile\" : false, \n  \"isSystem\" : false, \n  \"status\" : 3, \n  \"type\" : 3, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
Example:

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/collection <<EOF\n{ \n  \"name\" : \"testCollectionUsers\", \n  \"keyOptions\" : { \n    \"type\" : \"autoincrement\", \n    \"increment\" : 5, \n    \"allowUserKeys\" : true \n  } \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\nlocation: /_db/_system/_api/collection/testCollectionUsers\n\n{ \n  \"id\" : \"620288455\", \n  \"name\" : \"testCollectionUsers\", \n  \"waitForSync\" : false, \n  \"isVolatile\" : false, \n  \"isSystem\" : false, \n  \"status\" : 3, \n  \"type\" : 2, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
", + "parameters": [ + { + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "$ref": "#/definitions/JSF_post_api_collection" + }, + "x-description-offset": 59 + } + ], + "responses": { + "400": { + "description": "If the collection-name is missing, then a HTTP 400 is returned.
" + }, + "404": { + "description": "If the collection-name is unknown, then a HTTP 404 is returned.

" + } + }, + "summary": " Create collection", + "tags": [ + "Collections" + ], + "x-examples": [], + "x-filename": "Collections - js/actions/_api/collection/app.js" + } + }, + "/_api/collection/{collection-name}": { + "delete": { + "description": "\n\nDrops the collection identified by collection-name.
If the collection was successfully dropped, an object is returned with the following attributes:
  • error: false
  • id: The identifier of the dropped collection.

Example: Using an identifier:

shell> curl -X DELETE --dump - http://localhost:8529/_api/collection/620485063\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"id\" : \"620485063\", \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
Example: Using a name:

shell> curl -X DELETE --dump - http://localhost:8529/_api/collection/products1\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"id\" : \"620681671\", \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
", + "parameters": [ + { + "description": "The name of the collection to drop.
", + "format": "string", + "in": "path", + "name": "collection-name", + "required": true, + "type": "string" + } + ], + "responses": { + "400": { + "description": "If the collection-name is missing, then a HTTP 400 is returned.
" + }, + "404": { + "description": "If the collection-name is unknown, then a HTTP 404 is returned.
" + } + }, + "summary": " Drops collection", + "tags": [ + "Collections" + ], + "x-examples": [], + "x-filename": "Collections - js/actions/_api/collection/app.js" + }, + "get": { + "description": "\n\nThe result is an object describing the collection with the following attributes:
  • id: The identifier of the collection.
  • name: The name of the collection.
  • status: The status of the collection as number. - 1: new born collection - 2: unloaded - 3: loaded - 4: in the process of being unloaded - 5: deleted - 6: loading
Every other status indicates a corrupted collection.
  • type: The type of the collection as number. - 2: document collection (normal case) - 3: edges collection
  • isSystem: If true then the collection is a system collection.
", + "parameters": [ + { + "description": "The name of the collection.
", + "format": "string", + "in": "path", + "name": "collection-name", + "required": true, + "type": "string" + } + ], + "responses": { + "404": { + "description": "If the collection-name is unknown, then a HTTP 404 is returned.
" + } + }, + "summary": " Return information about a collection", + "tags": [ + "Collections" + ], + "x-examples": [], + "x-filename": "Collections - js/actions/_api/collection/app.js" + } + }, + "/_api/collection/{collection-name}/checksum": { + "get": { + "description": "\n\nWill calculate a checksum of the meta-data (keys and optionally revision ids) and optionally the document data in the collection.
The checksum can be used to compare if two collections on different ArangoDB instances contain the same contents. The current revision of the collection is returned too so one can make sure the checksums are calculated for the same state of data.
By default, the checksum will only be calculated on the _key system attribute of the documents contained in the collection. For edge collections, the system attributes _from and _to will also be included in the calculation.
By setting the optional URL parameter withRevisions to true, then revision ids (_rev system attributes) are included in the checksumming.
By providing the optional URL parameter withData with a value of true, the user-defined document attributes will be included in the calculation too. Note: Including user-defined attributes will make the checksumming slower.
The response is a JSON object with the following attributes:
  • checksum: The calculated checksum as a number.
  • revision: The collection revision id as a string.
Note: this method is not available in a cluster.

Example: Retrieving the checksum of a collection:

shell> curl --dump - http://localhost:8529/_api/collection/products/checksum\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"id\" : \"620878279\", \n  \"name\" : \"products\", \n  \"isSystem\" : false, \n  \"status\" : 3, \n  \"type\" : 2, \n  \"checksum\" : 2335626498, \n  \"revision\" : \"621205959\", \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
Example: Retrieving the checksum of a collection including the collection data, but not the revisions:

shell> curl --dump - http://localhost:8529/_api/collection/products/checksum?withRevisions=false&withData=true\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"id\" : \"621468103\", \n  \"name\" : \"products\", \n  \"isSystem\" : false, \n  \"status\" : 3, \n  \"type\" : 2, \n  \"checksum\" : 1042110547, \n  \"revision\" : \"621795783\", \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
", + "parameters": [ + { + "description": "The name of the collection.
", + "format": "string", + "in": "path", + "name": "collection-name", + "required": true, + "type": "string" + }, + { + "description": "Whether or not to include document revision ids in the checksum calculation.
", + "in": "query", + "name": "withRevisions", + "required": false, + "type": "boolean" + }, + { + "description": "Whether or not to include document body data in the checksum calculation.
", + "in": "query", + "name": "withData", + "required": false, + "type": "boolean" + } + ], + "responses": { + "400": { + "description": "If the collection-name is missing, then a HTTP 400 is returned.
" + }, + "404": { + "description": "If the collection-name is unknown, then a HTTP 404 is returned.
" + } + }, + "summary": " Return checksum for the collection", + "tags": [ + "Collections" + ], + "x-examples": [], + "x-filename": "Collections - js/actions/_api/collection/app.js" + } + }, + "/_api/collection/{collection-name}/count": { + "get": { + "description": "\n\nIn addition to the above, the result also contains the number of documents. Note that this will always load the collection into memory.
  • count: The number of documents inside the collection.

Example: Requesting the number of documents:

shell> curl --dump - http://localhost:8529/_api/collection/products/count\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\nlocation: /_db/_system/_api/collection/products/count\n\n{ \n  \"id\" : \"622057927\", \n  \"name\" : \"products\", \n  \"isSystem\" : false, \n  \"doCompact\" : true, \n  \"isVolatile\" : false, \n  \"journalSize\" : 1048576, \n  \"keyOptions\" : { \n    \"type\" : \"traditional\", \n    \"allowUserKeys\" : true \n  }, \n  \"waitForSync\" : true, \n  \"indexBuckets\" : 8, \n  \"count\" : 100, \n  \"status\" : 3, \n  \"type\" : 2, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
", + "parameters": [ + { + "description": "The name of the collection.
", + "format": "string", + "in": "path", + "name": "collection-name", + "required": true, + "type": "string" + } + ], + "responses": { + "400": { + "description": "If the collection-name is missing, then a HTTP 400 is returned.
" + }, + "404": { + "description": "If the collection-name is unknown, then a HTTP 404 is returned.
" + } + }, + "summary": " Return number of documents in a collection", + "tags": [ + "Collections" + ], + "x-examples": [], + "x-filename": "Collections - js/actions/_api/collection/app.js" + } + }, + "/_api/collection/{collection-name}/figures": { + "get": { + "description": "\n\nIn addition to the above, the result also contains the number of documents and additional statistical information about the collection. Note : This will always load the collection into memory.
Note: collection data that are stored in the write-ahead log only are not reported in the results. When the write-ahead log is collected, documents might be added to journals and datafiles of the collection, which may modify the figures of the collection.
Additionally, the filesizes of collection and index parameter JSON files are not reported. These files should normally have a size of a few bytes each. Please also note that the fileSize values are reported in bytes and reflect the logical file sizes. Some filesystems may use optimisations (e.g. sparse files) so that the actual physical file size is somewhat different. Directories and sub-directories may also require space in the file system, but this space is not reported in the fileSize results.
That means that the figures reported do not reflect the actual disk usage of the collection with 100% accuracy. The actual disk usage of a collection is normally slightly higher than the sum of the reported fileSize values. Still the sum of the fileSize values can still be used as a lower bound approximation of the disk usage.
**A json document with these Properties is returned:**
  • count: The number of documents currently present in the collection.
  • journalSize: The maximal size of a journal or datafile in bytes.
  • figures: metrics of the collection
    • datafiles: Metrics regarding the datafiles
      • count: The number of datafiles.
      • fileSize: The total filesize of datafiles (in bytes).
    • uncollectedLogfileEntries: The number of markers in the write-ahead log for this collection that have not been transferred to journals or datafiles.
    • compactors:
      • count: The number of compactor files.
      • fileSize: The total filesize of all compactor files (in bytes).
    • dead: the items waiting to be swept away by the cleaner
      • count: The number of dead documents. This includes document versions that have been deleted or replaced by a newer version. Documents deleted or replaced that are contained the write-ahead log only are not reported in this figure.
      • deletion: The total number of deletion markers. Deletion markers only contained in the write-ahead log are not reporting in this figure.
      • size: The total size in bytes used by all dead documents.
    • indexes:
      • count: The total number of indexes defined for the collection, including the pre-defined indexes (e.g. primary index).
      • size: The total memory allocated for indexes in bytes.
    • shapes:
      • count: The total number of shapes used in the collection. This includes shapes that are not in use anymore. Shapes that are contained in the write-ahead log only are not reported in this figure.
      • size: The total size of all shapes (in bytes). This includes shapes that are not in use anymore. Shapes that are contained in the write-ahead log only are not reported in this figure.
    • alive: the currently active figures
      • count: The number of currently active documents in all datafiles and journals of the collection. Documents that are contained in the write-ahead log only are not reported in this figure.
      • size: The total size in bytes used by all active documents of the collection. Documents that are contained in the write-ahead log only are not reported in this figure.
    • attributes:
      • count: The total number of attributes used in the collection. Note: the value includes data of attributes that are not in use anymore. Attributes that are contained in the write-ahead log only are not reported in this figure.
      • size: The total size of the attribute data (in bytes). Note: the value includes data of attributes that are not in use anymore. Attributes that are contained in the write-ahead log only are not reported in this figure.
    • shapefiles: deprecated
      • count: The number of shape files. This value is deprecated and kept for compatibility reasons only. The value will always be 0 since ArangoDB 2.0 and higher.
      • fileSize: The total filesize of the shape files. This value is deprecated and kept for compatibility reasons only. The value will always be 0 in ArangoDB 2.0 and higher.
    • journals: Metrics regarding the journal files
      • count: The number of journal files.
      • fileSize: The total filesize of all journal files (in bytes).
    • maxTick: The tick of the last marker that was stored in a journal of the collection. This might be 0 if the collection does not yet have a journal.

Example: Using an identifier and requesting the figures of the collection:

shell> curl --dump - http://localhost:8529/_api/collection/products/figures\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\nlocation: /_db/_system/_api/collection/products/figures\n\n{ \n  \"id\" : \"642111943\", \n  \"name\" : \"products\", \n  \"isSystem\" : false, \n  \"doCompact\" : true, \n  \"isVolatile\" : false, \n  \"journalSize\" : 1048576, \n  \"keyOptions\" : { \n    \"type\" : \"traditional\", \n    \"allowUserKeys\" : true \n  }, \n  \"waitForSync\" : false, \n  \"indexBuckets\" : 8, \n  \"count\" : 1, \n  \"figures\" : { \n    \"alive\" : { \n      \"count\" : 0, \n      \"size\" : 0 \n    }, \n    \"dead\" : { \n      \"count\" : 0, \n      \"size\" : 0, \n      \"deletion\" : 0 \n    }, \n    \"datafiles\" : { \n      \"count\" : 0, \n      \"fileSize\" : 0 \n    }, \n    \"journals\" : { \n      \"count\" : 1, \n      \"fileSize\" : 1048576 \n    }, \n    \"compactors\" : { \n      \"count\" : 0, \n      \"fileSize\" : 0 \n    }, \n    \"shapefiles\" : { \n      \"count\" : 0, \n      \"fileSize\" : 0 \n    }, \n    \"shapes\" : { \n      \"count\" : 0, \n      \"size\" : 0 \n    }, \n    \"attributes\" : { \n      \"count\" : 0, \n      \"size\" : 0 \n    }, \n    \"indexes\" : { \n      \"count\" : 1, \n      \"size\" : 16120 \n    }, \n    \"lastTick\" : \"642505159\", \n    \"uncollectedLogfileEntries\" : 1 \n  }, \n  \"status\" : 3, \n  \"type\" : 2, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
", + "parameters": [ + { + "description": "The name of the collection.
", + "format": "string", + "in": "path", + "name": "collection-name", + "required": true, + "type": "string" + } + ], + "produces": [ + "application/json" + ], + "responses": { + "200": { + "description": "Returns information about the collection:
", + "schema": { + "$ref": "#/definitions/JSA_get_api_collection_figures_rc_200" + }, + "x-description-offset": 1458 + }, + "400": { + "description": "If the collection-name is missing, then a HTTP 400 is returned.
" + }, + "404": { + "description": "If the collection-name is unknown, then a HTTP 404 is returned.
" + } + }, + "summary": " Return statistics for a collection", + "tags": [ + "Collections" + ], + "x-examples": [], + "x-filename": "Collections - js/actions/_api/collection/app.js" + } + }, + "/_api/collection/{collection-name}/load": { + "put": { + "description": "\n\nLoads a collection into memory. Returns the collection on success.
The request body object might optionally contain the following attribute:
  • count: If set, this controls whether the return value should include the number of documents in the collection. Setting count to false may speed up loading a collection. The default value for count is true.
On success an object with the following attributes is returned:
  • id: The identifier of the collection.
  • name: The name of the collection.
  • count: The number of documents inside the collection. This is only returned if the count input parameters is set to true or has not been specified.
  • status: The status of the collection as number.
  • type: The collection type. Valid types are: - 2: document collection - 3: edges collection
  • isSystem: If true then the collection is a system collection.

Example:

shell> curl -X PUT --dump - http://localhost:8529/_api/collection/products/load\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"id\" : \"644078023\", \n  \"name\" : \"products\", \n  \"isSystem\" : false, \n  \"count\" : 0, \n  \"status\" : 3, \n  \"type\" : 2, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
", + "parameters": [ + { + "description": "The name of the collection.
", + "format": "string", + "in": "path", + "name": "collection-name", + "required": true, + "type": "string" + } + ], + "responses": { + "400": { + "description": "If the collection-name is missing, then a HTTP 400 is returned.
" + }, + "404": { + "description": "If the collection-name is unknown, then a HTTP 404 is returned.
" + } + }, + "summary": " Load collection", + "tags": [ + "Collections" + ], + "x-examples": [], + "x-filename": "Collections - js/actions/_api/collection/app.js" + } + }, + "/_api/collection/{collection-name}/properties": { + "get": { + "description": "\n\nIn addition to the above, the result will always contain the waitForSync, doCompact, journalSize, and isVolatile attributes. This is achieved by forcing a load of the underlying collection.
  • waitForSync: If true then creating, changing or removing documents will wait until the data has been synchronized to disk.
  • doCompact: Whether or not the collection will be compacted.
  • journalSize: The maximal size setting for journals / datafiles in bytes.
  • keyOptions: JSON object which contains key generation options: - type: specifies the type of the key generator. The currently available generators are traditional and autoincrement. - allowUserKeys: if set to true, then it is allowed to supply own key values in the _key attribute of a document. If set to false, then the key generator is solely responsible for generating keys and supplying own key values in the _key attribute of documents is considered an error.
  • isVolatile: If true then the collection data will be kept in memory only and ArangoDB will not write or sync the data to disk.
In a cluster setup, the result will also contain the following attributes:
  • numberOfShards: the number of shards of the collection.
  • shardKeys: contains the names of document attributes that are used to determine the target shard for documents.

Example: Using an identifier:

shell> curl --dump - http://localhost:8529/_api/collection/643422663/properties\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\nlocation: /_db/_system/_api/collection/products/properties\n\n{ \n  \"id\" : \"643422663\", \n  \"name\" : \"products\", \n  \"isSystem\" : false, \n  \"doCompact\" : true, \n  \"isVolatile\" : false, \n  \"journalSize\" : 1048576, \n  \"keyOptions\" : { \n    \"type\" : \"traditional\", \n    \"allowUserKeys\" : true \n  }, \n  \"waitForSync\" : true, \n  \"indexBuckets\" : 8, \n  \"status\" : 3, \n  \"type\" : 2, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
Example: Using a name:

shell> curl --dump - http://localhost:8529/_api/collection/products/properties\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\nlocation: /_db/_system/_api/collection/products/properties\n\n{ \n  \"id\" : \"643619271\", \n  \"name\" : \"products\", \n  \"isSystem\" : false, \n  \"doCompact\" : true, \n  \"isVolatile\" : false, \n  \"journalSize\" : 1048576, \n  \"keyOptions\" : { \n    \"type\" : \"traditional\", \n    \"allowUserKeys\" : true \n  }, \n  \"waitForSync\" : true, \n  \"indexBuckets\" : 8, \n  \"status\" : 3, \n  \"type\" : 2, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
", + "parameters": [ + { + "description": "The name of the collection.
", + "format": "string", + "in": "path", + "name": "collection-name", + "required": true, + "type": "string" + } + ], + "responses": { + "400": { + "description": "If the collection-name is missing, then a HTTP 400 is returned.
" + }, + "404": { + "description": "If the collection-name is unknown, then a HTTP 404 is returned.
" + } + }, + "summary": " Read properties of a collection", + "tags": [ + "Collections" + ], + "x-examples": [], + "x-filename": "Collections - js/actions/_api/collection/app.js" + }, + "put": { + "description": "\n\nChanges the properties of a collection. Expects an object with the attribute(s)
  • waitForSync: If true then creating or changing a document will wait until the data has been synchronized to disk.
  • journalSize: The maximal size of a journal or datafile in bytes. The value must be at least `1048576` (1 MB). Note that when changing the journalSize value, it will only have an effect for additional journals or datafiles that are created. Already existing journals or datafiles will not be affected.
On success an object with the following attributes is returned:
  • id: The identifier of the collection.
  • name: The name of the collection.
  • waitForSync: The new value.
  • journalSize: The new value.
  • status: The status of the collection as number.
  • type: The collection type. Valid types are: - 2: document collection - 3: edges collection
  • isSystem: If true then the collection is a system collection.
  • isVolatile: If true then the collection data will be kept in memory only and ArangoDB will not write or sync the data to disk.
  • doCompact: Whether or not the collection will be compacted.
  • keyOptions: JSON object which contains key generation options: - type: specifies the type of the key generator. The currently available generators are traditional and autoincrement. - allowUserKeys: if set to true, then it is allowed to supply own key values in the _key attribute of a document. If set to false, then the key generator is solely responsible for generating keys and supplying own key values in the _key attribute of documents is considered an error.
Note: some other collection properties, such as type, isVolatile, numberOfShards or shardKeys cannot be changed once a collection is created.

Example:

shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/collection/products/properties <<EOF\n{ \n  \"waitForSync\" : true \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"id\" : \"644340167\", \n  \"name\" : \"products\", \n  \"isSystem\" : false, \n  \"doCompact\" : true, \n  \"isVolatile\" : false, \n  \"journalSize\" : 1048576, \n  \"keyOptions\" : { \n    \"type\" : \"traditional\", \n    \"allowUserKeys\" : true \n  }, \n  \"waitForSync\" : true, \n  \"indexBuckets\" : 8, \n  \"status\" : 3, \n  \"type\" : 2, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
", + "parameters": [ + { + "description": "The name of the collection.
", + "format": "string", + "in": "path", + "name": "collection-name", + "required": true, + "type": "string" + } + ], + "responses": { + "400": { + "description": "If the collection-name is missing, then a HTTP 400 is returned.
" + }, + "404": { + "description": "If the collection-name is unknown, then a HTTP 404 is returned.
" + } + }, + "summary": " Change properties of a collection", + "tags": [ + "Collections" + ], + "x-examples": [], + "x-filename": "Collections - js/actions/_api/collection/app.js" + } + }, + "/_api/collection/{collection-name}/rename": { + "put": { + "description": "\n\nRenames a collection. Expects an object with the attribute(s)
  • name: The new name.
If returns an object with the attributes
  • id: The identifier of the collection.
  • name: The new name of the collection.
  • status: The status of the collection as number.
  • type: The collection type. Valid types are: - 2: document collection - 3: edges collection
  • isSystem: If true then the collection is a system collection.

Example:

shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/collection/products1/rename <<EOF\n{ \n  \"name\" : \"newname\" \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"id\" : \"644602311\", \n  \"name\" : \"newname\", \n  \"isSystem\" : false, \n  \"status\" : 3, \n  \"type\" : 2, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
", + "parameters": [ + { + "description": "The name of the collection to rename.
", + "format": "string", + "in": "path", + "name": "collection-name", + "required": true, + "type": "string" + } + ], + "responses": { + "400": { + "description": "If the collection-name is missing, then a HTTP 400 is returned.
" + }, + "404": { + "description": "If the collection-name is unknown, then a HTTP 404 is returned." + } + }, + "summary": " Rename collection", + "tags": [ + "Collections" + ], + "x-examples": [], + "x-filename": "Collections - js/actions/_api/collection/app.js" + } + }, + "/_api/collection/{collection-name}/revision": { + "get": { + "description": "\n\nIn addition to the above, the result will also contain the collection's revision id. The revision id is a server-generated string that clients can use to check whether data in a collection has changed since the last revision check.
  • revision: The collection revision id as a string.

Example: Retrieving the revision of a collection

shell> curl --dump - http://localhost:8529/_api/collection/products/revision\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"id\" : \"643815879\", \n  \"name\" : \"products\", \n  \"isSystem\" : false, \n  \"status\" : 3, \n  \"type\" : 2, \n  \"revision\" : \"0\", \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
", + "parameters": [ + { + "description": "The name of the collection.
", + "format": "string", + "in": "path", + "name": "collection-name", + "required": true, + "type": "string" + } + ], + "responses": { + "400": { + "description": "If the collection-name is missing, then a HTTP 400 is returned.
" + }, + "404": { + "description": "If the collection-name is unknown, then a HTTP 404 is returned.
" + } + }, + "summary": " Return collection revision id", + "tags": [ + "Collections" + ], + "x-examples": [], + "x-filename": "Collections - js/actions/_api/collection/app.js" + } + }, + "/_api/collection/{collection-name}/rotate": { + "put": { + "description": "\n\nRotates the journal of a collection. The current journal of the collection will be closed and made a read-only datafile. The purpose of the rotate method is to make the data in the file available for compaction (compaction is only performed for read-only datafiles, and not for journals).
Saving new data in the collection subsequently will create a new journal file automatically if there is no current journal.
If returns an object with the attributes
  • result: will be true if rotation succeeded
Note: This method is not available in a cluster.

Example: Rotating the journal:

shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/collection/products/rotate <<EOF\n{ \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : true, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
Example: Rotating if no journal exists:

shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/collection/products/rotate <<EOF\n{ \n}\nEOF\n\nHTTP/1.1 400 Bad Request\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"error\" : true, \n  \"code\" : 400, \n  \"errorNum\" : 1105, \n  \"errorMessage\" : \"could not rotate journal: no journal\" \n}\n

\n
", + "parameters": [ + { + "description": "The name of the collection.
", + "format": "string", + "in": "path", + "name": "collection-name", + "required": true, + "type": "string" + } + ], + "responses": { + "400": { + "description": "If the collection currently has no journal, HTTP 400 is returned.
" + }, + "404": { + "description": "If the collection-name is unknown, then a HTTP 404 is returned.
" + } + }, + "summary": " Rotate journal of a collection", + "tags": [ + "Collections" + ], + "x-examples": [], + "x-filename": "Collections - js/actions/_api/collection/app.js" + } + }, + "/_api/collection/{collection-name}/truncate": { + "put": { + "description": "\n\nRemoves all documents from the collection, but leaves the indexes intact.

Example:

shell> curl -X PUT --dump - http://localhost:8529/_api/collection/products/truncate\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"id\" : \"644864455\", \n  \"name\" : \"products\", \n  \"isSystem\" : false, \n  \"status\" : 3, \n  \"type\" : 2, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
", + "parameters": [ + { + "description": "The name of the collection.
", + "format": "string", + "in": "path", + "name": "collection-name", + "required": true, + "type": "string" + } + ], + "responses": { + "400": { + "description": "If the collection-name is missing, then a HTTP 400 is returned.
" + }, + "404": { + "description": "If the collection-name is unknown, then a HTTP 404 is returned.
" + } + }, + "summary": " Truncate collection", + "tags": [ + "Collections" + ], + "x-examples": [], + "x-filename": "Collections - js/actions/_api/collection/app.js" + } + }, + "/_api/collection/{collection-name}/unload": { + "put": { + "description": "\n\nRemoves a collection from memory. This call does not delete any documents. You can use the collection afterwards; in which case it will be loaded into memory, again. On success an object with the following attributes is returned:
  • id: The identifier of the collection.
  • name: The name of the collection.
  • status: The status of the collection as number.
  • type: The collection type. Valid types are: - 2: document collection - 3: edges collection
  • isSystem: If true then the collection is a system collection.

Example:

shell> curl -X PUT --dump - http://localhost:8529/_api/collection/products/unload\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"id\" : \"645126599\", \n  \"name\" : \"products\", \n  \"isSystem\" : false, \n  \"status\" : 2, \n  \"type\" : 2, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
", + "parameters": [ + { + "description": "
", + "format": "string", + "in": "path", + "name": "collection-name", + "required": true, + "type": "string" + } + ], + "responses": { + "400": { + "description": "If the collection-name is missing, then a HTTP 400 is returned.
" + }, + "404": { + "description": "If the collection-name is unknown, then a HTTP 404 is returned.
" + } + }, + "summary": " Unload collection", + "tags": [ + "Collections" + ], + "x-examples": [], + "x-filename": "Collections - js/actions/_api/collection/app.js" + } + }, + "/_api/cursor": { + "post": { + "description": "**A json post document with these Properties is required:**
  • count: indicates whether the number of documents in the result set should be returned in the \"count\" attribute of the result. Calculating the \"count\" attribute might in the future have a performance impact for some queries so this option is turned off by default, and \"count\" is only returned when requested.
  • ttl: The time-to-live for the cursor (in seconds). The cursor will be removed on the server automatically after the specified amount of time. This is useful to ensure garbage collection of cursors that are not fully fetched by clients. If not set, a server-defined value will be used.
  • batchSize: maximum number of result documents to be transferred from the server to the client in one roundtrip. If this attribute is not set, a server-controlled default value will be used. A batchSize value of 0 is disallowed.
  • cache: flag to determine whether the AQL query cache shall be used. If set to false, then any query cache lookup will be skipped for the query. If set to true, it will lead to the query cache being checked for the query if the query cache mode is either on or demand.
  • bindVars: list of bind parameter objects. of type object
  • query: contains the query string to be executed
  • options: key/value object with extra options for the query.
    • profile: if set to true, then the additional query profiling information will be returned in the extra.stats return attribute if the query result is not served from the query cache.
    • optimizer.rules: a list of to-be-included or to-be-excluded optimizer rules can be put into this attribute, telling the optimizer to include or exclude specific rules. To disable a rule, prefix its name with a `-`, to enable a rule, prefix it with a `+`. There is also a pseudo-rule `all`, which will match all optimizer rules. of type string
    • fullCount: if set to true and the query contains a LIMIT clause, then the result will contain an extra attribute extra with a sub-attribute fullCount. This sub-attribute will contain the number of documents in the result before the last LIMIT in the query was applied. It can be used to count the number of documents that match certain filter criteria, but only return a subset of them, in one go. It is thus similar to MySQL's SQL_CALC_FOUND_ROWS hint. Note that setting the option will disable a few LIMIT optimizations and may lead to more documents being processed, and thus make queries run longer. Note that the fullCount sub-attribute will only be present in the result if the query has a LIMIT clause and the LIMIT clause is actually used in the query.
    • maxPlans: limits the maximum number of plans that are created by the AQL query optimizer.
\n\nThe query details include the query string plus optional query options and bind parameters. These values need to be passed in a JSON representation in the body of the POST request.
**A json document with these Properties is returned:**
  • count: the total number of result documents available (only available if the query was executed with the count attribute set)
  • code: the HTTP status code
  • extra: an optional JSON object with extra information about the query result contained in its stats sub-attribute. For data-modification queries, the extra.stats sub-attribute will contain the number of modified documents and the number of documents that could not be modified due to an error (if ignoreErrors query option is specified)
  • cached: a boolean flag indicating whether the query result was served from the query cache or not. If the query result is served from the query cache, the extra return attribute will not contain any stats sub-attribute and no profile sub-attribute.
  • hasMore: A boolean indicator whether there are more results available for the cursor on the server
  • result: an array of result documents (might be empty if query has no results) anonymous json object
  • error: A flag to indicate that an error occurred (false in this case)
  • id: id of temporary cursor created on the server (optional, see above)
  • errorMessage: a descriptive error message
    If the query specification is complete, the server will process the query. If an error occurs during query processing, the server will respond with HTTP 400. Again, the body of the response will contain details about the error.
    A list of query errors can be found (../ArangoErrors/README.md) here.

  • errorNum: the server error number
  • code: the HTTP status code
  • error: boolean flag to indicate that an error occurred (true in this case)

Example: Execute a query and extract the result in a single go

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/cursor <<EOF\n{ \n  \"query\" : \"FOR p IN products LIMIT 2 RETURN p\", \n  \"count\" : true, \n  \"batchSize\" : 2 \n}\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : [ \n    { \n      \"hello2\" : \"world1\", \n      \"_id\" : \"products/648862151\", \n      \"_rev\" : \"648862151\", \n      \"_key\" : \"648862151\" \n    }, \n    { \n      \"hello1\" : \"world1\", \n      \"_id\" : \"products/648534471\", \n      \"_rev\" : \"648534471\", \n      \"_key\" : \"648534471\" \n    } \n  ], \n  \"hasMore\" : false, \n  \"count\" : 2, \n  \"cached\" : false, \n  \"extra\" : { \n    \"stats\" : { \n      \"writesExecuted\" : 0, \n      \"writesIgnored\" : 0, \n      \"scannedFull\" : 2, \n      \"scannedIndex\" : 0, \n      \"filtered\" : 0 \n    }, \n    \"warnings\" : [ ] \n  }, \n  \"error\" : false, \n  \"code\" : 201 \n}\n

\n
Example: Execute a query and extract a part of the result

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/cursor <<EOF\n{ \n  \"query\" : \"FOR p IN products LIMIT 5 RETURN p\", \n  \"count\" : true, \n  \"batchSize\" : 2 \n}\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : [ \n    { \n      \"hello3\" : \"world1\", \n      \"_id\" : \"products/647223751\", \n      \"_rev\" : \"647223751\", \n      \"_key\" : \"647223751\" \n    }, \n    { \n      \"hello5\" : \"world1\", \n      \"_id\" : \"products/647879111\", \n      \"_rev\" : \"647879111\", \n      \"_key\" : \"647879111\" \n    } \n  ], \n  \"hasMore\" : true, \n  \"id\" : \"648075719\", \n  \"count\" : 5, \n  \"extra\" : { \n    \"stats\" : { \n      \"writesExecuted\" : 0, \n      \"writesIgnored\" : 0, \n      \"scannedFull\" : 5, \n      \"scannedIndex\" : 0, \n      \"filtered\" : 0 \n    }, \n    \"warnings\" : [ ] \n  }, \n  \"cached\" : false, \n  \"error\" : false, \n  \"code\" : 201 \n}\n

\n
Example: Using the query option \"fullCount\"

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/cursor <<EOF\n{ \n  \"query\" : \"FOR i IN 1..1000 FILTER i > 500 LIMIT 10 RETURN i\", \n  \"count\" : true, \n  \"options\" : { \n    \"fullCount\" : true \n  } \n}\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : [ \n    501, \n    502, \n    503, \n    504, \n    505, \n    506, \n    507, \n    508, \n    509, \n    510 \n  ], \n  \"hasMore\" : false, \n  \"count\" : 10, \n  \"cached\" : false, \n  \"extra\" : { \n    \"stats\" : { \n      \"writesExecuted\" : 0, \n      \"writesIgnored\" : 0, \n      \"scannedFull\" : 0, \n      \"scannedIndex\" : 0, \n      \"filtered\" : 500, \n      \"fullCount\" : 500 \n    }, \n    \"warnings\" : [ ] \n  }, \n  \"error\" : false, \n  \"code\" : 201 \n}\n

\n
Example: Enabling and disabling optimizer rules

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/cursor <<EOF\n{ \n  \"query\" : \"FOR i IN 1..10 LET a = 1 LET b = 2 FILTER a + b == 3 RETURN i\", \n  \"count\" : true, \n  \"options\" : { \n    \"maxPlans\" : 1, \n    \"optimizer\" : { \n      \"rules\" : [ \n        \"-all\", \n        \"+remove-unnecessary-filters\" \n      ] \n    } \n  } \n}\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : [ \n    1, \n    2, \n    3, \n    4, \n    5, \n    6, \n    7, \n    8, \n    9, \n    10 \n  ], \n  \"hasMore\" : false, \n  \"count\" : 10, \n  \"cached\" : false, \n  \"extra\" : { \n    \"stats\" : { \n      \"writesExecuted\" : 0, \n      \"writesIgnored\" : 0, \n      \"scannedFull\" : 0, \n      \"scannedIndex\" : 0, \n      \"filtered\" : 0 \n    }, \n    \"warnings\" : [ ] \n  }, \n  \"error\" : false, \n  \"code\" : 201 \n}\n

\n
Example: Execute a data-modification query and retrieve the number of modified documents

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/cursor <<EOF\n{ \n  \"query\" : \"FOR p IN products REMOVE p IN products\" \n}\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : [ ], \n  \"hasMore\" : false, \n  \"cached\" : false, \n  \"extra\" : { \n    \"stats\" : { \n      \"writesExecuted\" : 2, \n      \"writesIgnored\" : 0, \n      \"scannedFull\" : 2, \n      \"scannedIndex\" : 0, \n      \"filtered\" : 0 \n    }, \n    \"warnings\" : [ ] \n  }, \n  \"error\" : false, \n  \"code\" : 201 \n}\n

\n
Example: Execute a data-modification query with option ignoreErrors

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/cursor <<EOF\n{ \n  \"query\" : \"REMOVE 'bar' IN products OPTIONS { ignoreErrors: true }\" \n}\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : [ ], \n  \"hasMore\" : false, \n  \"cached\" : false, \n  \"extra\" : { \n    \"stats\" : { \n      \"writesExecuted\" : 0, \n      \"writesIgnored\" : 1, \n      \"scannedFull\" : 0, \n      \"scannedIndex\" : 0, \n      \"filtered\" : 0 \n    }, \n    \"warnings\" : [ ] \n  }, \n  \"error\" : false, \n  \"code\" : 201 \n}\n

\n
Example: Bad query - Missing body

shell> curl -X POST --dump - http://localhost:8529/_api/cursor\n\nHTTP/1.1 400 Bad Request\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"error\" : true, \n  \"errorMessage\" : \"expecting atom, got end-of-file\", \n  \"code\" : 400, \n  \"errorNum\" : 600 \n}\n

\n
Example: Bad query - Unknown collection

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/cursor <<EOF\n{ \n  \"query\" : \"FOR u IN unknowncoll LIMIT 2 RETURN u\", \n  \"count\" : true, \n  \"batchSize\" : 2 \n}\nEOF\n\nHTTP/1.1 404 Not Found\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"error\" : true, \n  \"errorMessage\" : \"collection not found (unknowncoll)\", \n  \"code\" : 404, \n  \"errorNum\" : 1203 \n}\n

\n
Example: Bad query - Execute a data-modification query that attempts to remove a non-existing document

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/cursor <<EOF\n{ \n  \"query\" : \"REMOVE 'foo' IN products\" \n}\nEOF\n\nHTTP/1.1 404 Not Found\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"error\" : true, \n  \"errorMessage\" : \"document not found (while executing)\", \n  \"code\" : 404, \n  \"errorNum\" : 1202 \n}\n

\n

", + "parameters": [ + { + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "$ref": "#/definitions/JSF_post_api_cursor" + }, + "x-description-offset": 59 + } + ], + "produces": [ + "application/json" + ], + "responses": { + "201": { + "description": "is returned if the result set can be created by the server.
", + "schema": { + "$ref": "#/definitions/JSF_post_api_cursor_rc_201" + }, + "x-description-offset": 300 + }, + "400": { + "description": "is returned if the JSON representation is malformed or the query specification is missing from the request.
If the JSON representation is malformed or the query specification is missing from the request, the server will respond with HTTP 400.
The body of the response will contain a JSON object with additional error details. The object has the following attributes:
", + "schema": { + "$ref": "#/definitions/JSF_post_api_cursor_rc_400" + }, + "x-description-offset": 354 + }, + "404": { + "description": "The server will respond with HTTP 404 in case a non-existing collection is accessed in the query.
" + }, + "405": { + "description": "The server will respond with HTTP 405 if an unsupported HTTP method is used.
" + } + }, + "summary": " Create cursor", + "tags": [ + "Cursors" + ], + "x-examples": [], + "x-filename": "Cursors - arangod/RestHandler/RestCursorHandler.cpp" + } + }, + "/_api/cursor/{cursor-identifier}": { + "delete": { + "description": "\n\nDeletes the cursor and frees the resources associated with it.
The cursor will automatically be destroyed on the server when the client has retrieved all documents from it. The client can also explicitly destroy the cursor at any earlier time using an HTTP DELETE request. The cursor id must be included as part of the URL.
Note: the server will also destroy abandoned cursors automatically after a certain server-controlled timeout to avoid resource leakage.

Example:

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/cursor <<EOF\n{ \n  \"query\" : \"FOR p IN products LIMIT 5 RETURN p\", \n  \"count\" : true, \n  \"batchSize\" : 2 \n}\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : [ \n    { \n      \"hello3\" : \"world1\", \n      \"_id\" : \"products/650172871\", \n      \"_rev\" : \"650172871\", \n      \"_key\" : \"650172871\" \n    }, \n    { \n      \"hello1\" : \"world1\", \n      \"_id\" : \"products/649517511\", \n      \"_rev\" : \"649517511\", \n      \"_key\" : \"649517511\" \n    } \n  ], \n  \"hasMore\" : true, \n  \"id\" : \"651024839\", \n  \"count\" : 5, \n  \"extra\" : { \n    \"stats\" : { \n      \"writesExecuted\" : 0, \n      \"writesIgnored\" : 0, \n      \"scannedFull\" : 5, \n      \"scannedIndex\" : 0, \n      \"filtered\" : 0 \n    }, \n    \"warnings\" : [ ] \n  }, \n  \"cached\" : false, \n  \"error\" : false, \n  \"code\" : 201 \n}\nshell> curl -X DELETE --dump - http://localhost:8529/_api/cursor/651024839\n\n

\n
", + "parameters": [ + { + "description": "The id of the cursor
", + "format": "string", + "in": "path", + "name": "cursor-identifier", + "required": true, + "type": "string" + } + ], + "responses": { + "202": { + "description": "is returned if the server is aware of the cursor.
" + }, + "404": { + "description": "is returned if the server is not aware of the cursor. It is also returned if a cursor is used after it has been destroyed.
" + } + }, + "summary": " Delete cursor", + "tags": [ + "Cursors" + ], + "x-examples": [], + "x-filename": "Cursors - arangod/RestHandler/RestCursorHandler.cpp" + }, + "put": { + "description": "\n\n
If the cursor is still alive, returns an object with the following attributes:
  • id: the cursor-identifier
  • result: a list of documents for the current batch
  • hasMore: false if this was the last batch
  • count: if present the total number of elements
Note that even if hasMore returns true, the next call might still return no documents. If, however, hasMore is false, then the cursor is exhausted. Once the hasMore attribute has a value of false, the client can stop.

Example: Valid request for next batch

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/cursor <<EOF\n{ \n  \"query\" : \"FOR p IN products LIMIT 5 RETURN p\", \n  \"count\" : true, \n  \"batchSize\" : 2 \n}\nEOF\n\nshell> curl -X PUT --dump - http://localhost:8529/_api/cursor/655481287\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : [ \n    { \n      \"hello1\" : \"world1\", \n      \"_id\" : \"products/653973959\", \n      \"_rev\" : \"653973959\", \n      \"_key\" : \"653973959\" \n    }, \n    { \n      \"hello3\" : \"world1\", \n      \"_id\" : \"products/654629319\", \n      \"_rev\" : \"654629319\", \n      \"_key\" : \"654629319\" \n    } \n  ], \n  \"hasMore\" : true, \n  \"id\" : \"655481287\", \n  \"count\" : 5, \n  \"extra\" : { \n    \"stats\" : { \n      \"writesExecuted\" : 0, \n      \"writesIgnored\" : 0, \n      \"scannedFull\" : 5, \n      \"scannedIndex\" : 0, \n      \"filtered\" : 0 \n    }, \n    \"warnings\" : [ ] \n  }, \n  \"cached\" : false, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
Example: Missing identifier

shell> curl -X PUT --dump - http://localhost:8529/_api/cursor\n\nHTTP/1.1 400 Bad Request\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"error\" : true, \n  \"errorMessage\" : \"expecting PUT /_api/cursor/<cursor-id>\", \n  \"code\" : 400, \n  \"errorNum\" : 400 \n}\n

\n
Example: Unknown identifier

shell> curl -X PUT --dump - http://localhost:8529/_api/cursor/123123\n\nHTTP/1.1 404 Not Found\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"error\" : true, \n  \"errorMessage\" : \"cursor not found\", \n  \"code\" : 404, \n  \"errorNum\" : 1600 \n}\n

\n
", + "parameters": [ + { + "description": "The name of the cursor
", + "format": "string", + "in": "path", + "name": "cursor-identifier", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "The server will respond with HTTP 200 in case of success.
" + }, + "400": { + "description": "If the cursor identifier is omitted, the server will respond with HTTP 404.
" + }, + "404": { + "description": "If no cursor with the specified identifier can be found, the server will respond with HTTP 404.
" + } + }, + "summary": " Read next batch from cursor", + "tags": [ + "Cursors" + ], + "x-examples": [], + "x-filename": "Cursors - arangod/RestHandler/RestCursorHandler.cpp" + } + }, + "/_api/database": { + "get": { + "description": "\n\nRetrieves the list of all existing databases
Note: retrieving the list of databases is only possible from within the _system database.

Example:

shell> curl --dump - http://localhost:8529/_api/database\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : [ \n    \"_system\" \n  ], \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
", + "parameters": [], + "responses": { + "200": { + "description": "is returned if the list of database was compiled successfully.
" + }, + "400": { + "description": "is returned if the request is invalid.
" + }, + "403": { + "description": "is returned if the request was not executed in the _system database.
" + } + }, + "summary": " List of databases", + "tags": [ + "Database" + ], + "x-examples": [], + "x-filename": "Database - js/actions/api-database.js" + }, + "post": { + "description": "**A json post document with these Properties is required:**
  • username: The user name as a string. If users is not specified or does not contain any users, a default user root will be created with an empty string password. This ensures that the new database will be accessible after it is created.
  • users: Has to be a list of user objects to initially create for the new database. Each user object can contain the following attributes: \n
    • username: Loginname of the user to be created
    • passwd: Password for the user
    • active: if False the user won't be able to log into the database.
  • extra: A JSON object with extra user information. The data contained in extra will be stored for the user but not be interpreted further by ArangoDB.
  • passwd: The user password as a string. If not specified, it will default to an empty string.
  • active: A Flag indicating whether the user account should be activated or not. The default value is true.
  • name: Has to contain a valid database name.
\n\nCreates a new database
The response is a JSON object with the attribute result set to true.
Note: creating a new database is only possible from within the _system database.

Example: Creating a database named example.

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/database <<EOF\n{ \n  \"name\" : \"example\" \n}\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : true, \n  \"error\" : false, \n  \"code\" : 201 \n}\n

\n
Example: Creating a database named mydb with two users.

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/database <<EOF\n{ \n  \"name\" : \"mydb\", \n  \"users\" : [ \n    { \n      \"username\" : \"admin\", \n      \"passwd\" : \"secret\", \n      \"active\" : true \n    }, \n    { \n      \"username\" : \"tester\", \n      \"passwd\" : \"test001\", \n      \"active\" : false \n    } \n  ] \n}\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : true, \n  \"error\" : false, \n  \"code\" : 201 \n}\n

\n
", + "parameters": [ + { + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "$ref": "#/definitions/JSF_get_api_database_new" + }, + "x-description-offset": 59 + } + ], + "responses": { + "201": { + "description": "is returned if the database was created successfully.
" + }, + "400": { + "description": "is returned if the request parameters are invalid or if a database with the specified name already exists.
" + }, + "403": { + "description": "is returned if the request was not executed in the _system database.
" + }, + "409": { + "description": "is returned if a database with the specified name already exists.
" + } + }, + "summary": " Create database", + "tags": [ + "Database" + ], + "x-examples": [], + "x-filename": "Database - js/actions/api-database.js" + } + }, + "/_api/database/current": { + "get": { + "description": "\n\nRetrieves information about the current database
The response is a JSON object with the following attributes:
  • name: the name of the current database
  • id: the id of the current database
  • path: the filesystem path of the current database
  • isSystem: whether or not the current database is the _system database

Example:

shell> curl --dump - http://localhost:8529/_api/database/current\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : { \n    \"name\" : \"_system\", \n    \"id\" : \"121287\", \n    \"path\" : \"/tmp/vocdir.2239/databases/database-121287\", \n    \"isSystem\" : true \n  }, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
", + "parameters": [], + "responses": { + "200": { + "description": "is returned if the information was retrieved successfully.
" + }, + "400": { + "description": "is returned if the request is invalid.
" + }, + "404": { + "description": "is returned if the database could not be found.
" + } + }, + "summary": " Information of the database", + "tags": [ + "Database" + ], + "x-examples": [], + "x-filename": "Database - js/actions/api-database.js" + } + }, + "/_api/database/user": { + "get": { + "description": "\n\nRetrieves the list of all databases the current user can access without specifying a different username or password.

Example:

shell> curl --dump - http://localhost:8529/_api/database/user\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : [ \n    \"_system\" \n  ], \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
", + "parameters": [], + "responses": { + "200": { + "description": "is returned if the list of database was compiled successfully.
" + }, + "400": { + "description": "is returned if the request is invalid.
" + } + }, + "summary": " List of accessible databases ", + "tags": [ + "Database" + ], + "x-examples": [], + "x-filename": "Database - js/actions/api-database.js" + } + }, + "/_api/database/{database-name}": { + "delete": { + "description": "\n\nDrops the database along with all data stored in it.
Note: dropping a database is only possible from within the _system database. The _system database itself cannot be dropped.

Example:

shell> curl -X DELETE --dump - http://localhost:8529/_api/database/example\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : true, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
", + "parameters": [ + { + "description": "The name of the database
", + "format": "string", + "in": "path", + "name": "database-name", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "is returned if the database was dropped successfully.
" + }, + "400": { + "description": "is returned if the request is malformed.
" + }, + "403": { + "description": "is returned if the request was not executed in the _system database.
" + }, + "404": { + "description": "is returned if the database could not be found.
" + } + }, + "summary": " Drop database", + "tags": [ + "Database" + ], + "x-examples": [], + "x-filename": "Database - js/actions/api-database.js" + } + }, + "/_api/document": { + "get": { + "description": "\n\nReturns an array of all keys, ids, or URI paths for all documents in the collection identified by collection. The type of the result array is determined by the type attribute.
Note that the results have no defined order and thus the order should not be relied on.

Example: Return all document paths

shell> curl --dump - http://localhost:8529/_api/document/?collection=products\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"documents\" : [ \n    \"/_db/_system/_api/document/products/711580103\", \n    \"/_db/_system/_api/document/products/712235463\", \n    \"/_db/_system/_api/document/products/711907783\" \n  ] \n}\n

\n
Example: Return all document keys

shell> curl --dump - http://localhost:8529/_api/document/?collection=products&type=key\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"documents\" : [ \n    \"710662599\", \n    \"710334919\", \n    \"710990279\" \n  ] \n}\n

\n
Example: Collection does not exist

shell> curl --dump - http://localhost:8529/_api/document/?collection=doesnotexist\n\nHTTP/1.1 404 Not Found\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"error\" : true, \n  \"errorMessage\" : \"collection 'doesnotexist' not found\", \n  \"code\" : 404, \n  \"errorNum\" : 1203 \n}\n

\n
", + "parameters": [ + { + "description": "The name of the collection.
", + "in": "query", + "name": "collection", + "required": true, + "type": "string" + }, + { + "description": "The type of the result. The following values are allowed:
  • id: returns an array of document ids (_id attributes)
  • key: returns an array of document keys (_key attributes)
  • path: returns an array of document URI paths. This is the default.
", + "in": "query", + "name": "type", + "required": false, + "type": "string" + } + ], + "responses": { + "200": { + "description": "All went good.
" + }, + "404": { + "description": "The collection does not exist.
" + } + }, + "summary": "Read all documents", + "tags": [ + "Documents" + ], + "x-examples": [], + "x-filename": "Documents - arangod/RestHandler/RestDocumentHandler.cpp" + }, + "post": { + "description": "free style json body\n\nCreates a new document in the collection named collection. A JSON representation of the document must be passed as the body of the POST request.
If the document was created successfully, then the \"Location\" header contains the path to the newly created document. The \"ETag\" header field contains the revision of the document.
The body of the response contains a JSON object with the following attributes:
  • _id contains the document handle of the newly created document
  • _key contains the document key
  • _rev contains the document revision
If the collection parameter waitForSync is false, then the call returns as soon as the document has been accepted. It will not wait until the document has been synced to disk.
Optionally, the URL parameter waitForSync can be used to force synchronization of the document creation operation to disk even in case that the waitForSync flag had been disabled for the entire collection. Thus, the waitForSync URL parameter can be used to force synchronization of just this specific operations. To use this, set the waitForSync parameter to true. If the waitForSync parameter is not specified or set to false, then the collection's default waitForSync behavior is applied. The waitForSync URL parameter cannot be used to disable synchronization for collections that have a default waitForSync value of true.

Example: Create a document in a collection named products. Note that the revision identifier might or might not by equal to the auto-generated key.

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/document?collection=products <<EOF\n{ \"Hello\": \"World\" }\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\netag: \"708172231\"\nlocation: /_db/_system/_api/document/products/708172231\n\n{ \n  \"error\" : false, \n  \"_id\" : \"products/708172231\", \n  \"_rev\" : \"708172231\", \n  \"_key\" : \"708172231\" \n}\n

\n
Example: Create a document in a collection named products with a collection-level waitForSync value of false.

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/document?collection=products <<EOF\n{ \"Hello\": \"World\" }\nEOF\n\nHTTP/1.1 202 Accepted\ncontent-type: application/json; charset=utf-8\netag: \"707647943\"\nlocation: /_db/_system/_api/document/products/707647943\n\n{ \n  \"error\" : false, \n  \"_id\" : \"products/707647943\", \n  \"_rev\" : \"707647943\", \n  \"_key\" : \"707647943\" \n}\n

\n
Example: Create a document in a collection with a collection-level waitForSync value of false, but using the waitForSync URL parameter.

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/document?collection=products&waitForSync=true <<EOF\n{ \"Hello\": \"World\" }\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\netag: \"709220807\"\nlocation: /_db/_system/_api/document/products/709220807\n\n{ \n  \"error\" : false, \n  \"_id\" : \"products/709220807\", \n  \"_rev\" : \"709220807\", \n  \"_key\" : \"709220807\" \n}\n

\n
Example: Create a document in a new, named collection

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/document?collection=products&createCollection=true <<EOF\n{ \"Hello\": \"World\" }\nEOF\n\nHTTP/1.1 202 Accepted\ncontent-type: application/json; charset=utf-8\netag: \"708696519\"\nlocation: /_db/_system/_api/document/products/708696519\n\n{ \n  \"error\" : false, \n  \"_id\" : \"products/708696519\", \n  \"_rev\" : \"708696519\", \n  \"_key\" : \"708696519\" \n}\n

\n
Example: Unknown collection name

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/document?collection=products <<EOF\n{ \"Hello\": \"World\" }\nEOF\n\nHTTP/1.1 404 Not Found\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"error\" : true, \n  \"errorMessage\" : \"collection 'products' not found\", \n  \"code\" : 404, \n  \"errorNum\" : 1203 \n}\n

\n
Example: Illegal document

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/document?collection=products <<EOF\n{ 1: \"World\" }\nEOF\n\nHTTP/1.1 400 Bad Request\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"error\" : true, \n  \"errorMessage\" : \"expecting attribute name\", \n  \"code\" : 400, \n  \"errorNum\" : 600 \n}\n

\n
", + "parameters": [ + { + "description": "A JSON representation of the document.
", + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "additionalProperties": {}, + "type": "object" + }, + "x-description-offset": 0 + }, + { + "description": "The collection name.
", + "in": "query", + "name": "collection", + "required": true, + "type": "string" + }, + { + "description": "If this parameter has a value of true or yes, then the collection is created if it does not yet exist. Other values will be ignored so the collection must be present for the operation to succeed.
Note: this flag is not supported in a cluster. Using it will result in an error.
", + "in": "query", + "name": "createCollection", + "required": false, + "type": "boolean" + }, + { + "description": "Wait until document has been synced to disk.
", + "in": "query", + "name": "waitForSync", + "required": false, + "type": "boolean" + } + ], + "responses": { + "201": { + "description": "is returned if the document was created successfully and waitForSync was true.
" + }, + "202": { + "description": "is returned if the document was created successfully and waitForSync was false.
" + }, + "400": { + "description": "is returned if the body does not contain a valid JSON representation of a document. The response body contains an error document in this case.
" + }, + "404": { + "description": "is returned if the collection specified by collection is unknown. The response body contains an error document in this case.
" + } + }, + "summary": "Create document", + "tags": [ + "Documents" + ], + "x-examples": [], + "x-filename": "Documents - arangod/RestHandler/RestDocumentHandler.cpp" + } + }, + "/_api/document/{document-handle}": { + "delete": { + "description": "\n\nThe body of the response contains a JSON object with the information about the handle and the revision. The attribute _id contains the known document-handle of the removed document, _key contains the key which uniquely identifies a document in a given collection, and the attribute _rev contains the new document revision.
If the waitForSync parameter is not specified or set to false, then the collection's default waitForSync behavior is applied. The waitForSync URL parameter cannot be used to disable synchronization for collections that have a default waitForSync value of true.

Example: Using document handle:

shell> curl -X DELETE --dump - http://localhost:8529/_api/document/products/700832199\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"error\" : false, \n  \"_id\" : \"products/700832199\", \n  \"_rev\" : \"700832199\", \n  \"_key\" : \"700832199\" \n}\n

\n
Example: Unknown document handle:

shell> curl -X DELETE --dump - http://localhost:8529/_api/document/products/702994887\n\nHTTP/1.1 404 Not Found\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"error\" : true, \n  \"errorMessage\" : \"document not found\", \n  \"code\" : 404, \n  \"errorNum\" : 1202 \n}\n

\n
Example: Revision conflict:

shell> curl -X DELETE --header 'If-Match: \"702339527\"' --dump - http://localhost:8529/_api/document/products/702011847\n\nHTTP/1.1 412 Precondition Failed\ncontent-type: application/json; charset=utf-8\netag: \"702011847\"\n\n{ \n  \"error\" : true, \n  \"code\" : 412, \n  \"errorNum\" : 1200, \n  \"errorMessage\" : \"precondition failed\", \n  \"_id\" : \"products/702011847\", \n  \"_rev\" : \"702011847\", \n  \"_key\" : \"702011847\" \n}\n

\n
", + "parameters": [ + { + "description": "Removes the document identified by document-handle.
", + "format": "string", + "in": "path", + "name": "document-handle", + "required": true, + "type": "string" + }, + { + "description": "You can conditionally remove a document based on a target revision id by using the rev URL parameter.
", + "in": "query", + "name": "rev", + "required": false, + "type": "string" + }, + { + "description": "To control the update behavior in case there is a revision mismatch, you can use the policy parameter. This is the same as when replacing documents (see replacing documents for more details).
", + "in": "query", + "name": "policy", + "required": false, + "type": "string" + }, + { + "description": "Wait until deletion operation has been synced to disk.
", + "in": "query", + "name": "waitForSync", + "required": false, + "type": "boolean" + }, + { + "description": "You can conditionally remove a document based on a target revision id by using the if-match HTTP header.
", + "in": "header", + "name": "If-Match", + "type": "string" + } + ], + "responses": { + "200": { + "description": "is returned if the document was removed successfully and waitForSync was true.
" + }, + "202": { + "description": "is returned if the document was removed successfully and waitForSync was false.
" + }, + "404": { + "description": "is returned if the collection or the document was not found. The response body contains an error document in this case.
" + }, + "412": { + "description": "is returned if a \"If-Match\" header or rev is given and the found document has a different version. The response will also contain the found document's current revision in the _rev attribute. Additionally, the attributes _id and _key will be returned.
" + } + }, + "summary": " Removes a document", + "tags": [ + "Documents" + ], + "x-examples": [], + "x-filename": "Documents - arangod/RestHandler/RestDocumentHandler.cpp" + }, + "get": { + "description": "\n\nReturns the document identified by document-handle. The returned document contains three special attributes: _id containing the document handle, _key containing key which uniquely identifies a document in a given collection and _rev containing the revision.

Example: Use a document handle:

shell> curl --dump - http://localhost:8529/_api/document/products/709745095\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\netag: \"709745095\"\n\n{ \n  \"hello\" : \"world\", \n  \"_id\" : \"products/709745095\", \n  \"_rev\" : \"709745095\", \n  \"_key\" : \"709745095\" \n}\n

\n
Example: Use a document handle and an etag:

shell> curl --header 'If-None-Match: \"713415111\"' --dump - http://localhost:8529/_api/document/products/713415111\n\n

\n
Example: Unknown document handle:

shell> curl --dump - http://localhost:8529/_api/document/products/unknownhandle\n\nHTTP/1.1 404 Not Found\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"error\" : true, \n  \"errorMessage\" : \"collection 'products' not found\", \n  \"code\" : 404, \n  \"errorNum\" : 1203 \n}\n

\n
", + "parameters": [ + { + "description": "The handle of the document.
", + "format": "string", + "in": "path", + "name": "document-handle", + "required": true, + "type": "string" + }, + { + "description": "If the \"If-None-Match\" header is given, then it must contain exactly one etag. The document is returned, if it has a different revision than the given etag. Otherwise an HTTP 304 is returned.
", + "in": "header", + "name": "If-None-Match", + "type": "string" + }, + { + "description": "If the \"If-Match\" header is given, then it must contain exactly one etag. The document is returned, if it has the same revision as the given etag. Otherwise a HTTP 412 is returned. As an alternative you can supply the etag in an attribute rev in the URL.
", + "in": "header", + "name": "If-Match", + "type": "string" + } + ], + "responses": { + "200": { + "description": "is returned if the document was found
" + }, + "304": { + "description": "is returned if the \"If-None-Match\" header is given and the document has the same version
" + }, + "404": { + "description": "is returned if the document or collection was not found
" + }, + "412": { + "description": "is returned if a \"If-Match\" header or rev is given and the found document has a different version. The response will also contain the found document's current revision in the _rev attribute. Additionally, the attributes _id and _key will be returned.
" + } + }, + "summary": "Read document", + "tags": [ + "Documents" + ], + "x-examples": [], + "x-filename": "Documents - arangod/RestHandler/RestDocumentHandler.cpp" + }, + "head": { + "description": "\n\nLike GET, but only returns the header fields and not the body. You can use this call to get the current revision of a document or check if the document was deleted.

Example:

shell> curl -X HEAD --dump - http://localhost:8529/_api/document/products/712825287\n\n

\n

", + "parameters": [ + { + "description": "The handle of the document.
", + "format": "string", + "in": "path", + "name": "document-handle", + "required": true, + "type": "string" + }, + { + "description": "You can conditionally fetch a document based on a target revision id by using the rev URL parameter.
", + "in": "query", + "name": "rev", + "required": false, + "type": "string" + }, + { + "description": "If the \"If-None-Match\" header is given, then it must contain exactly one etag. If the current document revision is different to the specified etag, an HTTP 200 response is returned. If the current document revision is identical to the specified etag, then an HTTP 304 is returned.
", + "in": "header", + "name": "If-None-Match", + "type": "string" + }, + { + "description": "You can conditionally fetch a document based on a target revision id by using the if-match HTTP header.
", + "in": "header", + "name": "If-Match", + "type": "string" + } + ], + "responses": { + "200": { + "description": "is returned if the document was found
" + }, + "304": { + "description": "is returned if the \"If-None-Match\" header is given and the document has same version
" + }, + "404": { + "description": "is returned if the document or collection was not found
" + }, + "412": { + "description": "is returned if a \"If-Match\" header or rev is given and the found document has a different version. The response will also contain the found document's current revision in the etag header.
" + } + }, + "summary": "Read document header", + "tags": [ + "Documents" + ], + "x-examples": [], + "x-filename": "Documents - arangod/RestHandler/RestDocumentHandler.cpp" + }, + "patch": { + "description": "free style json body\n\nPartially updates the document identified by document-handle. The body of the request must contain a JSON document with the attributes to patch (the patch document). All attributes from the patch document will be added to the existing document if they do not yet exist, and overwritten in the existing document if they do exist there.
Setting an attribute value to null in the patch document will cause a value of null be saved for the attribute by default.
Optionally, the URL parameter waitForSync can be used to force synchronization of the document update operation to disk even in case that the waitForSync flag had been disabled for the entire collection. Thus, the waitForSync URL parameter can be used to force synchronization of just specific operations. To use this, set the waitForSync parameter to true. If the waitForSync parameter is not specified or set to false, then the collection's default waitForSync behavior is applied. The waitForSync URL parameter cannot be used to disable synchronization for collections that have a default waitForSync value of true.
The body of the response contains a JSON object with the information about the handle and the revision. The attribute _id contains the known document-handle of the updated document, _key contains the key which uniquely identifies a document in a given collection, and the attribute _rev contains the new document revision.
If the document does not exist, then a HTTP 404 is returned and the body of the response contains an error document.
You can conditionally update a document based on a target revision id by using either the rev URL parameter or the if-match HTTP header. To control the update behavior in case there is a revision mismatch, you can use the policy parameter. This is the same as when replacing documents (see replacing documents for details).

Example: patches an existing document with new content.

shell> curl -X PATCH --data-binary @- --dump - http://localhost:8529/_api/document/products/703846855 <<EOF\n{ \n  \"hello\" : \"world\" \n}\nEOF\n\nHTTP/1.1 202 Accepted\ncontent-type: application/json; charset=utf-8\netag: \"704174535\"\nlocation: /_db/_system/_api/document/products/703846855\n\n{ \n  \"error\" : false, \n  \"_id\" : \"products/703846855\", \n  \"_rev\" : \"704174535\", \n  \"_key\" : \"703846855\" \n}\nshell> curl -X PATCH --data-binary @- --dump - http://localhost:8529/_api/document/products/703846855 <<EOF\n{ \n  \"numbers\" : { \n    \"one\" : 1, \n    \"two\" : 2, \n    \"three\" : 3, \n    \"empty\" : null \n  } \n}\nEOF\n\nHTTP/1.1 202 Accepted\ncontent-type: application/json; charset=utf-8\netag: \"704764359\"\nlocation: /_db/_system/_api/document/products/703846855\n\n{ \n  \"error\" : false, \n  \"_id\" : \"products/703846855\", \n  \"_rev\" : \"704764359\", \n  \"_key\" : \"703846855\" \n}\nshell> curl --dump - http://localhost:8529/_api/document/products/703846855\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\netag: \"704764359\"\n\n{ \n  \"one\" : \"world\", \n  \"hello\" : \"world\", \n  \"numbers\" : { \n    \"empty\" : null, \n    \"one\" : 1, \n    \"two\" : 2, \n    \"three\" : 3 \n  }, \n  \"_id\" : \"products/703846855\", \n  \"_rev\" : \"704764359\", \n  \"_key\" : \"703846855\" \n}\nshell> curl -X PATCH --data-binary @- --dump - http://localhost:8529/_api/document/products/703846855?keepNull=false <<EOF\n{ \n  \"hello\" : null, \n  \"numbers\" : { \n    \"four\" : 4 \n  } \n}\nEOF\n\nHTTP/1.1 202 Accepted\ncontent-type: application/json; charset=utf-8\netag: \"705223111\"\nlocation: /_db/_system/_api/document/products/703846855\n\n{ \n  \"error\" : false, \n  \"_id\" : \"products/703846855\", \n  \"_rev\" : \"705223111\", \n  \"_key\" : \"703846855\" \n}\nshell> curl --dump - http://localhost:8529/_api/document/products/703846855\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\netag: \"705223111\"\n\n{ \n  \"one\" : \"world\", \n  \"numbers\" : { \n    \"empty\" : null, \n    \"one\" : 1, \n    \"two\" : 2, \n    \"three\" : 3, \n    \"four\" : 4 \n  }, \n  \"_id\" : \"products/703846855\", \n  \"_rev\" : \"705223111\", \n  \"_key\" : \"703846855\" \n}\n

\n
Example: Merging attributes of an object using `mergeObjects`:

shell> curl --dump - http://localhost:8529/_api/document/products/706075079\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\netag: \"706075079\"\n\n{ \n  \"inhabitants\" : { \n    \"china\" : 1366980000, \n    \"india\" : 1263590000, \n    \"usa\" : 319220000 \n  }, \n  \"_id\" : \"products/706075079\", \n  \"_rev\" : \"706075079\", \n  \"_key\" : \"706075079\" \n}\nshell> curl -X PATCH --data-binary @- --dump - http://localhost:8529/_api/document/products/706075079?mergeObjects=true <<EOF\n{ \n  \"inhabitants\" : { \n    \"indonesia\" : 252164800, \n    \"brazil\" : 203553000 \n  } \n}\nEOF\n\nshell> curl --dump - http://localhost:8529/_api/document/products/706075079\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\netag: \"706599367\"\n\n{ \n  \"inhabitants\" : { \n    \"china\" : 1366980000, \n    \"india\" : 1263590000, \n    \"usa\" : 319220000, \n    \"indonesia\" : 252164800, \n    \"brazil\" : 203553000 \n  }, \n  \"_id\" : \"products/706075079\", \n  \"_rev\" : \"706599367\", \n  \"_key\" : \"706075079\" \n}\nshell> curl -X PATCH --data-binary @- --dump - http://localhost:8529/_api/document/products/706075079?mergeObjects=false <<EOF\n{ \n  \"inhabitants\" : { \n    \"pakistan\" : 188346000 \n  } \n}\nEOF\n\nHTTP/1.1 202 Accepted\ncontent-type: application/json; charset=utf-8\netag: \"707058119\"\nlocation: /_db/_system/_api/document/products/706075079\n\n{ \n  \"error\" : false, \n  \"_id\" : \"products/706075079\", \n  \"_rev\" : \"707058119\", \n  \"_key\" : \"706075079\" \n}\nshell> curl --dump - http://localhost:8529/_api/document/products/706075079\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\netag: \"707058119\"\n\n{ \n  \"inhabitants\" : { \n    \"pakistan\" : 188346000 \n  }, \n  \"_id\" : \"products/706075079\", \n  \"_rev\" : \"707058119\", \n  \"_key\" : \"706075079\" \n}\n

\n
", + "parameters": [ + { + "description": "A JSON representation of the document update.
", + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "additionalProperties": {}, + "type": "object" + }, + "x-description-offset": 0 + }, + { + "description": "The handle of the document.
", + "format": "string", + "in": "path", + "name": "document-handle", + "required": true, + "type": "string" + }, + { + "description": "If the intention is to delete existing attributes with the patch command, the URL query parameter keepNull can be used with a value of false. This will modify the behavior of the patch command to remove any attributes from the existing document that are contained in the patch document with an attribute value of null.
", + "in": "query", + "name": "keepNull", + "required": false, + "type": "boolean" + }, + { + "description": "Controls whether objects (not arrays) will be merged if present in both the existing and the patch document. If set to false, the value in the patch document will overwrite the existing document's value. If set to true, objects will be merged. The default is true.
", + "in": "query", + "name": "mergeObjects", + "required": false, + "type": "boolean" + }, + { + "description": "Wait until document has been synced to disk.
", + "in": "query", + "name": "waitForSync", + "required": false, + "type": "boolean" + }, + { + "description": "You can conditionally patch a document based on a target revision id by using the rev URL parameter.
", + "in": "query", + "name": "rev", + "required": false, + "type": "string" + }, + { + "description": "To control the update behavior in case there is a revision mismatch, you can use the policy parameter.
", + "in": "query", + "name": "policy", + "required": false, + "type": "string" + }, + { + "description": "You can conditionally patch a document based on a target revision id by using the if-match HTTP header.
", + "in": "header", + "name": "If-Match", + "type": "string" + } + ], + "responses": { + "201": { + "description": "is returned if the document was created successfully and waitForSync was true.
" + }, + "202": { + "description": "is returned if the document was created successfully and waitForSync was false.
" + }, + "400": { + "description": "is returned if the body does not contain a valid JSON representation of a document. The response body contains an error document in this case.
" + }, + "404": { + "description": "is returned if the collection or the document was not found
" + }, + "412": { + "description": "is returned if a \"If-Match\" header or rev is given and the found document has a different version. The response will also contain the found document's current revision in the _rev attribute. Additionally, the attributes _id and _key will be returned.
" + } + }, + "summary": " Patch document", + "tags": [ + "Documents" + ], + "x-examples": [], + "x-filename": "Documents - arangod/RestHandler/RestDocumentHandler.cpp" + }, + "put": { + "description": "free style json body\n\nCompletely updates (i.e. replaces) the document identified by document-handle. If the document exists and can be updated, then a HTTP 201 is returned and the \"ETag\" header field contains the new revision of the document.
If the new document passed in the body of the request contains the document-handle in the attribute _id and the revision in _rev, these attributes will be ignored. Only the URI and the \"ETag\" header are relevant in order to avoid confusion when using proxies.

Optionally, the URL parameter waitForSync can be used to force synchronization of the document replacement operation to disk even in case that the waitForSync flag had been disabled for the entire collection. Thus, the waitForSync URL parameter can be used to force synchronization of just specific operations. To use this, set the waitForSync parameter to true. If the waitForSync parameter is not specified or set to false, then the collection's default waitForSync behavior is applied. The waitForSync URL parameter cannot be used to disable synchronization for collections that have a default waitForSync value of true.

The body of the response contains a JSON object with the information about the handle and the revision. The attribute _id contains the known document-handle of the updated document, _key contains the key which uniquely identifies a document in a given collection, and the attribute _rev contains the new document revision.
If the document does not exist, then a HTTP 404 is returned and the body of the response contains an error document.
There are two ways for specifying the targeted document revision id for conditional replacements (i.e. replacements that will only be executed if the revision id found in the database matches the document revision id specified in the request):
  • specifying the target revision in the rev URL query parameter
  • specifying the target revision in the if-match HTTP header
    Specifying a target revision is optional, however, if done, only one of the described mechanisms must be used (either the rev URL parameter or the if-match HTTP header). Regardless which mechanism is used, the parameter needs to contain the target document revision id as returned in the _rev attribute of a document or by an HTTP etag header.
For example, to conditionally replace a document based on a specific revision id, you can use the following request:

`PUT /_api/document/document-handle?rev=etag`

If a target revision id is provided in the request (e.g. via the etag value in the rev URL query parameter above), ArangoDB will check that the revision id of the document found in the database is equal to the target revision id provided in the request. If there is a mismatch between the revision id, then by default a HTTP 412 conflict is returned and no replacement is performed.

The conditional update behavior can be overridden with the policy URL query parameter:

`PUT /_api/document/document-handle?policy=policy`

If policy is set to error, then the behavior is as before: replacements will fail if the revision id found in the database does not match the target revision id specified in the request.
If policy is set to last, then the replacement will succeed, even if the revision id found in the database does not match the target revision id specified in the request. You can use the last *policy* to force replacements.

Example: Using a document handle

shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/document/products/714004935 <<EOF\n{\"Hello\": \"you\"}\nEOF\n\nHTTP/1.1 202 Accepted\ncontent-type: application/json; charset=utf-8\netag: \"714332615\"\nlocation: /_db/_system/_api/document/products/714004935\n\n{ \n  \"error\" : false, \n  \"_id\" : \"products/714004935\", \n  \"_rev\" : \"714332615\", \n  \"_key\" : \"714004935\" \n}\n

\n
Example: Unknown document handle

shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/document/products/718199239 <<EOF\n{}\nEOF\n\nHTTP/1.1 404 Not Found\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"error\" : true, \n  \"errorMessage\" : \"document not found\", \n  \"code\" : 404, \n  \"errorNum\" : 1202 \n}\n

\n
Example: Produce a revision conflict

shell> curl -X PUT --header 'If-Match: \"715184583\"' --data-binary @- --dump - http://localhost:8529/_api/document/products/714856903 <<EOF\n{\"other\":\"content\"}\nEOF\n\nHTTP/1.1 412 Precondition Failed\ncontent-type: application/json; charset=utf-8\netag: \"714856903\"\n\n{ \n  \"error\" : true, \n  \"code\" : 412, \n  \"errorNum\" : 1200, \n  \"errorMessage\" : \"precondition failed\", \n  \"_id\" : \"products/714856903\", \n  \"_rev\" : \"714856903\", \n  \"_key\" : \"714856903\" \n}\n

\n
Example: Last write wins

shell> curl -X PUT --header 'If-Match: \"716298695\"' --data-binary @- --dump - http://localhost:8529/_api/document/products/715971015?policy=last <<EOF\n{}\nEOF\n\nHTTP/1.1 202 Accepted\ncontent-type: application/json; charset=utf-8\netag: \"716560839\"\nlocation: /_db/_system/_api/document/products/715971015\n\n{ \n  \"error\" : false, \n  \"_id\" : \"products/715971015\", \n  \"_rev\" : \"716560839\", \n  \"_key\" : \"715971015\" \n}\n

\n
Example: Alternative to header fields

shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/document/products/717085127?rev=717412807 <<EOF\n{\"other\":\"content\"}\nEOF\n\nHTTP/1.1 412 Precondition Failed\ncontent-type: application/json; charset=utf-8\netag: \"717085127\"\n\n{ \n  \"error\" : true, \n  \"code\" : 412, \n  \"errorNum\" : 1200, \n  \"errorMessage\" : \"precondition failed\", \n  \"_id\" : \"products/717085127\", \n  \"_rev\" : \"717085127\", \n  \"_key\" : \"717085127\" \n}\n

\n
", + "parameters": [ + { + "description": "A JSON representation of the new document.
", + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "additionalProperties": {}, + "type": "object" + }, + "x-description-offset": 0 + }, + { + "description": "The handle of the document.
", + "format": "string", + "in": "path", + "name": "document-handle", + "required": true, + "type": "string" + }, + { + "description": "Wait until document has been synced to disk.
", + "in": "query", + "name": "waitForSync", + "required": false, + "type": "boolean" + }, + { + "description": "You can conditionally replace a document based on a target revision id by using the rev URL parameter.
", + "in": "query", + "name": "rev", + "required": false, + "type": "string" + }, + { + "description": "To control the update behavior in case there is a revision mismatch, you can use the policy parameter (see below).
", + "in": "query", + "name": "policy", + "required": false, + "type": "string" + }, + { + "description": "You can conditionally replace a document based on a target revision id by using the if-match HTTP header.
", + "in": "header", + "name": "If-Match", + "type": "string" + } + ], + "responses": { + "201": { + "description": "is returned if the document was replaced successfully and waitForSync was true.
" + }, + "202": { + "description": "is returned if the document was replaced successfully and waitForSync was false.
" + }, + "400": { + "description": "is returned if the body does not contain a valid JSON representation of a document. The response body contains an error document in this case.
" + }, + "404": { + "description": "is returned if the collection or the document was not found
" + }, + "412": { + "description": "is returned if a \"If-Match\" header or rev is given and the found document has a different version. The response will also contain the found document's current revision in the _rev attribute. Additionally, the attributes _id and _key will be returned.
" + } + }, + "summary": "Replace document", + "tags": [ + "Documents" + ], + "x-examples": [], + "x-filename": "Documents - arangod/RestHandler/RestDocumentHandler.cpp" + } + }, + "/_api/edge": { + "get": { + "description": "\n\nReturns an array of all URIs for all edges from the collection identified by collection.
", + "parameters": [ + { + "description": "The name of the collection.
", + "in": "query", + "name": "collection", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "All went good.
" + }, + "404": { + "description": "The collection does not exist.
" + } + }, + "summary": " Read all edges from collection", + "tags": [ + "Graph edges" + ], + "x-examples": [], + "x-filename": "Graph edges - arangod/RestHandler/RestEdgeHandler.cpp, js/actions/api-edges.js" + }, + "post": { + "description": "free style json body\n\nCreates a new edge document in the collection named collection. A JSON representation of the document must be passed as the body of the POST request.
The from and to handles are immutable once the edge has been created.
In all other respects the method works like POST /document.

Example: Create an edge and read it back:

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/edge/?collection=edges&from=vertices/1&to=vertices/2 <<EOF\n{ \n  \"name\" : \"Emil\" \n}\nEOF\n\nHTTP/1.1 202 Accepted\ncontent-type: application/json; charset=utf-8\netag: \"721082823\"\nlocation: /_db/_system/_api/edge/edges/721082823\n\n{ \n  \"error\" : false, \n  \"_id\" : \"edges/721082823\", \n  \"_rev\" : \"721082823\", \n  \"_key\" : \"721082823\" \n}\nshell> curl --dump - http://localhost:8529/_api/edge/edges/721082823\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\netag: \"721082823\"\n\n{ \n  \"name\" : \"Emil\", \n  \"_id\" : \"edges/721082823\", \n  \"_rev\" : \"721082823\", \n  \"_key\" : \"721082823\", \n  \"_from\" : \"vertices/1\", \n  \"_to\" : \"vertices/2\" \n}\n

\n
", + "parameters": [ + { + "description": "A JSON representation of the edge document must be passed as the body of the POST request. This JSON object may contain the edge's document key in the _key attribute if needed.
", + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "additionalProperties": {}, + "type": "object" + }, + "x-description-offset": 0 + }, + { + "description": "Creates a new edge in the collection identified by collection name.
", + "in": "query", + "name": "collection", + "required": true, + "type": "string" + }, + { + "description": "If this parameter has a value of true or yes, then the collection is created if it does not yet exist. Other values will be ignored so the collection must be present for the operation to succeed.
Note: This flag is not supported in a cluster. Using it will result in an error.
", + "in": "query", + "name": "createCollection", + "required": false, + "type": "boolean" + }, + { + "description": "Wait until the edge document has been synced to disk.
", + "in": "query", + "name": "waitForSync", + "required": false, + "type": "boolean" + }, + { + "description": "The document handle of the start point must be passed in from handle.
", + "in": "query", + "name": "from", + "required": true, + "type": "string" + }, + { + "description": "The document handle of the end point must be passed in to handle.
", + "in": "query", + "name": "to", + "required": true, + "type": "string" + } + ], + "responses": { + "201": { + "description": "is returned if the edge was created successfully and waitForSync was true.
" + }, + "202": { + "description": "is returned if the edge was created successfully and waitForSync was false.
" + }, + "400": { + "description": "is returned if the body does not contain a valid JSON representation of an edge, or if the collection specified is not an edge collection. The response body contains an error document in this case.
" + }, + "404": { + "description": "is returned if the collection specified by collection is unknown. The response body contains an error document in this case.
" + } + }, + "summary": "Create edge", + "tags": [ + "Graph edges" + ], + "x-examples": [], + "x-filename": "Graph edges - arangod/RestHandler/RestEdgeHandler.cpp, js/actions/api-edges.js" + } + }, + "/_api/edge/{document-handle}": { + "delete": { + "description": "\n\nThe body of the response contains a JSON object with the information about the handle and the revision. The attribute _id contains the known document-handle of the deleted edge document, _key contains the key which uniquely identifies a document in a given collection, and the attribute _rev contains the new document revision.
If the waitForSync parameter is not specified or set to false, then the collection's default waitForSync behavior is applied. The waitForSync URL parameter cannot be used to disable synchronization for collections that have a default waitForSync value of true.
", + "parameters": [ + { + "description": "Deletes the edge document identified by document-handle.
", + "format": "string", + "in": "path", + "name": "document-handle", + "required": true, + "type": "string" + }, + { + "description": "You can conditionally delete an edge document based on a target revision id by using the rev URL parameter.
", + "in": "query", + "name": "rev", + "required": false, + "type": "string" + }, + { + "description": "To control the update behavior in case there is a revision mismatch, you can use the policy parameter. This is the same as when replacing edge documents (see replacing edge documents for more details).
", + "in": "query", + "name": "policy", + "required": false, + "type": "string" + }, + { + "description": "Wait until edge document has been synced to disk.
", + "in": "query", + "name": "waitForSync", + "required": false, + "type": "boolean" + }, + { + "description": "You can conditionally delete an edge document based on a target revision id by using the if-match HTTP header.
", + "in": "header", + "name": "If-Match", + "type": "string" + } + ], + "responses": { + "200": { + "description": "is returned if the edge document was deleted successfully and waitForSync was true.
" + }, + "202": { + "description": "is returned if the edge document was deleted successfully and waitForSync was false.
" + }, + "404": { + "description": "is returned if the collection or the edge document was not found. The response body contains an error document in this case.
" + }, + "412": { + "description": "is returned if a \"If-Match\" header or rev is given and the found document has a different version. The response will also contain the found document's current revision in the _rev attribute. Additionally, the attributes _id and _key will be returned.
" + } + }, + "summary": " Deletes edge", + "tags": [ + "Graph edges" + ], + "x-examples": [], + "x-filename": "Graph edges - arangod/RestHandler/RestEdgeHandler.cpp, js/actions/api-edges.js" + }, + "get": { + "description": "\n\nReturns the edge identified by document-handle. The returned edge contains a few special attributes:
  • _id contains the document handle
  • _rev contains the revision
  • _from and to contain the document handles of the connected vertex documents
", + "parameters": [ + { + "description": "The handle of the edge document.
", + "format": "string", + "in": "path", + "name": "document-handle", + "required": true, + "type": "string" + }, + { + "description": "If the \"If-None-Match\" header is given, then it must contain exactly one etag. The edge is returned if it has a different revision than the given etag. Otherwise an HTTP 304 is returned.
", + "in": "header", + "name": "If-None-Match", + "type": "string" + }, + { + "description": "If the \"If-Match\" header is given, then it must contain exactly one etag. The edge is returned if it has the same revision ad the given etag. Otherwise a HTTP 412 is returned. As an alternative you can supply the etag in an attribute rev in the URL.
", + "in": "header", + "name": "If-Match", + "type": "string" + } + ], + "responses": { + "200": { + "description": "is returned if the edge was found
" + }, + "304": { + "description": "is returned if the \"If-None-Match\" header is given and the edge has the same version
" + }, + "404": { + "description": "is returned if the edge or collection was not found
" + }, + "412": { + "description": "is returned if a \"If-Match\" header or rev is given and the found document has a different version. The response will also contain the found document's current revision in the _rev attribute. Additionally, the attributes _id and _key will be returned.
" + } + }, + "summary": " Read edge", + "tags": [ + "Graph edges" + ], + "x-examples": [], + "x-filename": "Graph edges - arangod/RestHandler/RestEdgeHandler.cpp, js/actions/api-edges.js" + }, + "head": { + "description": "\n\nLike GET, but only returns the header fields and not the body. You can use this call to get the current revision of an edge document or check if it was deleted.
", + "parameters": [ + { + "description": "The handle of the edge document.
", + "format": "string", + "in": "path", + "name": "document-handle", + "required": true, + "type": "string" + }, + { + "description": "You can conditionally fetch an edge document based on a target revision id by using the rev URL parameter.
", + "in": "query", + "name": "rev", + "required": false, + "type": "string" + }, + { + "description": "If the \"If-None-Match\" header is given, then it must contain exactly one etag. If the current document revision is different to the specified etag, an HTTP 200 response is returned. If the current document revision is identical to the specified etag, then an HTTP 304 is returned.
", + "in": "header", + "name": "If-None-Match", + "type": "string" + }, + { + "description": "You can conditionally fetch an edge document based on a target revision id by using the if-match HTTP header.
", + "in": "header", + "name": "If-Match", + "type": "string" + } + ], + "responses": { + "200": { + "description": "is returned if the edge document was found
" + }, + "304": { + "description": "is returned if the \"If-None-Match\" header is given and the edge document has same version
" + }, + "404": { + "description": "is returned if the edge document or collection was not found
" + }, + "412": { + "description": "is returned if a \"If-Match\" header or rev is given and the found document has a different version. The response will also contain the found document's current revision in the etag header.
" + } + }, + "summary": " Read edge header", + "tags": [ + "Graph edges" + ], + "x-examples": [], + "x-filename": "Graph edges - arangod/RestHandler/RestEdgeHandler.cpp, js/actions/api-edges.js" + }, + "patch": { + "description": "free style json body\n\nPartially updates the edge document identified by document-handle. The body of the request must contain a JSON document with the attributes to patch (the patch document). All attributes from the patch document will be added to the existing edge document if they do not yet exist, and overwritten in the existing edge document if they do exist there.
Setting an attribute value to null in the patch document will cause a value of null be saved for the attribute by default.
Note: Internal attributes such as _key, _from and _to are immutable once set and cannot be updated.
Optionally, the URL parameter waitForSync can be used to force synchronization of the edge document update operation to disk even in case that the waitForSync flag had been disabled for the entire collection. Thus, the waitForSync URL parameter can be used to force synchronization of just specific operations. To use this, set the waitForSync parameter to true. If the waitForSync parameter is not specified or set to false, then the collection's default waitForSync behavior is applied. The waitForSync URL parameter cannot be used to disable synchronization for collections that have a default waitForSync value of true.
The body of the response contains a JSON object with the information about the handle and the revision. The attribute _id contains the known document-handle of the updated edge document, _key contains the key which uniquely identifies a document in a given collection, and the attribute _rev contains the new document revision.
If the edge document does not exist, then a HTTP 404 is returned and the body of the response contains an error document.
You can conditionally update an edge document based on a target revision id by using either the rev URL parameter or the if-match HTTP header. To control the update behavior in case there is a revision mismatch, you can use the policy parameter. This is the same as when replacing edge documents (see replacing documents for details).
", + "parameters": [ + { + "description": "A JSON representation of the edge update.
", + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "additionalProperties": {}, + "type": "object" + }, + "x-description-offset": 0 + }, + { + "description": "The handle of the edge document.
", + "format": "string", + "in": "path", + "name": "document-handle", + "required": true, + "type": "string" + }, + { + "description": "If the intention is to delete existing attributes with the patch command, the URL query parameter keepNull can be used with a value of false. This will modify the behavior of the patch command to remove any attributes from the existing edge document that are contained in the patch document with an attribute value of null.
", + "in": "query", + "name": "keepNull", + "required": false, + "type": "boolean" + }, + { + "description": "Controls whether objects (not arrays) will be merged if present in both the existing and the patch edge. If set to false, the value in the patch edge will overwrite the existing edge's value. If set to true, objects will be merged. The default is true.
", + "in": "query", + "name": "mergeObjects", + "required": false, + "type": "boolean" + }, + { + "description": "Wait until edge document has been synced to disk.
", + "in": "query", + "name": "waitForSync", + "required": false, + "type": "boolean" + }, + { + "description": "You can conditionally patch an edge document based on a target revision id by using the rev URL parameter.
", + "in": "query", + "name": "rev", + "required": false, + "type": "string" + }, + { + "description": "To control the update behavior in case there is a revision mismatch, you can use the policy parameter.
", + "in": "query", + "name": "policy", + "required": false, + "type": "string" + }, + { + "description": "You can conditionally patch an edge document based on a target revision id by using the if-match HTTP header.
", + "in": "header", + "name": "If-Match", + "type": "string" + } + ], + "responses": { + "201": { + "description": "is returned if the document was patched successfully and waitForSync was true.
" + }, + "202": { + "description": "is returned if the document was patched successfully and waitForSync was false.
" + }, + "400": { + "description": "is returned if the body does not contain a valid JSON representation or when applied on an non-edge collection. The response body contains an error document in this case.
" + }, + "404": { + "description": "is returned if the collection or the edge document was not found
" + }, + "412": { + "description": "is returned if a \"If-Match\" header or rev is given and the found document has a different version. The response will also contain the found document's current revision in the _rev attribute. Additionally, the attributes _id and _key will be returned.
" + } + }, + "summary": " Patches edge", + "tags": [ + "Graph edges" + ], + "x-examples": [], + "x-filename": "Graph edges - arangod/RestHandler/RestEdgeHandler.cpp, js/actions/api-edges.js" + }, + "put": { + "description": "free style json body\n\nCompletely updates (i.e. replaces) the edge document identified by document-handle. If the edge document exists and can be updated, then a HTTP 201 is returned and the \"ETag\" header field contains the new revision of the edge document.
If the new edge document passed in the body of the request contains the document-handle in the attribute _id and the revision in _rev, these attributes will be ignored. Only the URI and the \"ETag\" header are relevant in order to avoid confusion when using proxies. Note: The attributes _from and _to of an edge are immutable and cannot be updated either.
Optionally, the URL parameter waitForSync can be used to force synchronization of the edge document replacement operation to disk even in case that the waitForSync flag had been disabled for the entire collection. Thus, the waitForSync URL parameter can be used to force synchronization of just specific operations. To use this, set the waitForSync parameter to true. If the waitForSync parameter is not specified or set to false, then the collection's default waitForSync behavior is applied. The waitForSync URL parameter cannot be used to disable synchronization for collections that have a default waitForSync value of true.
The body of the response contains a JSON object with the information about the handle and the revision. The attribute _id contains the known document-handle of the updated edge document, _key contains the key which uniquely identifies a document in a given collection, and the attribute _rev contains the new document revision.
If the edge document does not exist, then a HTTP 404 is returned and the body of the response contains an error document.
There are two ways for specifying the targeted revision id for conditional replacements (i.e. replacements that will only be executed if the revision id found in the database matches the revision id specified in the request):
  • specifying the target revision in the rev URL query parameter
  • specifying the target revision in the if-match HTTP header
Specifying a target revision is optional, however, if done, only one of the described mechanisms must be used (either the rev URL parameter or the if-match HTTP header). Regardless which mechanism is used, the parameter needs to contain the target revision id as returned in the _rev attribute of an edge document or by an HTTP etag header.
For example, to conditionally replace an edge document based on a specific revision id, you can use the following request:
  • PUT /_api/document/document-handle?rev=etag
If a target revision id is provided in the request (e.g. via the etag value in the rev URL query parameter above), ArangoDB will check that the revision id of the edge document found in the database is equal to the target revision id provided in the request. If there is a mismatch between the revision id, then by default a HTTP 412 conflict is returned and no replacement is performed.
The conditional update behavior can be overridden with the policy URL query parameter:
  • PUT /_api/document/document-handle?policy=policy
If policy is set to error, then the behavior is as before: replacements will fail if the revision id found in the database does not match the target revision id specified in the request.
If policy is set to last, then the replacement will succeed, even if the revision id found in the database does not match the target revision id specified in the request. You can use the last *policy* to force replacements.
", + "parameters": [ + { + "description": "A JSON representation of the new edge data.
", + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "additionalProperties": {}, + "type": "object" + }, + "x-description-offset": 0 + }, + { + "description": "The handle of the edge document.
", + "format": "string", + "in": "path", + "name": "document-handle", + "required": true, + "type": "string" + }, + { + "description": "Wait until edge document has been synced to disk.
", + "in": "query", + "name": "waitForSync", + "required": false, + "type": "boolean" + }, + { + "description": "You can conditionally replace an edge document based on a target revision id by using the rev URL parameter.
", + "in": "query", + "name": "rev", + "required": false, + "type": "string" + }, + { + "description": "To control the update behavior in case there is a revision mismatch, you can use the policy parameter (see below).
", + "in": "query", + "name": "policy", + "required": false, + "type": "string" + }, + { + "description": "You can conditionally replace an edge document based on a target revision id by using the if-match HTTP header.
", + "in": "header", + "name": "If-Match", + "type": "string" + } + ], + "responses": { + "201": { + "description": "is returned if the edge document was replaced successfully and waitForSync was true.
" + }, + "202": { + "description": "is returned if the edge document was replaced successfully and waitForSync was false.
" + }, + "400": { + "description": "is returned if the body does not contain a valid JSON representation of an edge document or if applied to a non-edge collection. The response body contains an error document in this case.
" + }, + "404": { + "description": "is returned if the collection or the edge document was not found
" + }, + "412": { + "description": "is returned if a \"If-Match\" header or rev is given and the found document has a different version. The response will also contain the found document's current revision in the _rev attribute. Additionally, the attributes _id and _key will be returned.
" + } + }, + "summary": "replaces an edge", + "tags": [ + "Graph edges" + ], + "x-examples": [], + "x-filename": "Graph edges - arangod/RestHandler/RestEdgeHandler.cpp, js/actions/api-edges.js" + } + }, + "/_api/edges/{collection-id}": { + "get": { + "description": "\n\nReturns an array of edges starting or ending in the vertex identified by vertex-handle.

Example: Any direction

shell> curl --dump - http://localhost:8529/_api/edges/edges?vertex=vertices/1\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"edges\" : [ \n    { \n      \"_id\" : \"edges/6\", \n      \"_key\" : \"6\", \n      \"_rev\" : \"725211591\", \n      \"_from\" : \"vertices/2\", \n      \"_to\" : \"vertices/1\", \n      \"$label\" : \"v2 -> v1\" \n    }, \n    { \n      \"_id\" : \"edges/7\", \n      \"_key\" : \"7\", \n      \"_rev\" : \"725735879\", \n      \"_from\" : \"vertices/4\", \n      \"_to\" : \"vertices/1\", \n      \"$label\" : \"v4 -> v1\" \n    }, \n    { \n      \"_id\" : \"edges/5\", \n      \"_key\" : \"5\", \n      \"_rev\" : \"724687303\", \n      \"_from\" : \"vertices/1\", \n      \"_to\" : \"vertices/3\", \n      \"$label\" : \"v1 -> v3\" \n    } \n  ], \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
Example: In edges

shell> curl --dump - http://localhost:8529/_api/edges/edges?vertex=vertices/1&direction=in\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"edges\" : [ \n    { \n      \"_id\" : \"edges/6\", \n      \"_key\" : \"6\", \n      \"_rev\" : \"729930183\", \n      \"_from\" : \"vertices/2\", \n      \"_to\" : \"vertices/1\", \n      \"$label\" : \"v2 -> v1\" \n    }, \n    { \n      \"_id\" : \"edges/7\", \n      \"_key\" : \"7\", \n      \"_rev\" : \"730454471\", \n      \"_from\" : \"vertices/4\", \n      \"_to\" : \"vertices/1\", \n      \"$label\" : \"v4 -> v1\" \n    } \n  ], \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
Example: Out edges

shell> curl --dump - http://localhost:8529/_api/edges/edges?vertex=vertices/1&direction=out\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"edges\" : [ \n    { \n      \"_id\" : \"edges/5\", \n      \"_key\" : \"5\", \n      \"_rev\" : \"734124487\", \n      \"_from\" : \"vertices/1\", \n      \"_to\" : \"vertices/3\", \n      \"$label\" : \"v1 -> v3\" \n    } \n  ], \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
", + "parameters": [ + { + "description": "The id of the collection.
", + "format": "string", + "in": "path", + "name": "collection-id", + "required": true, + "type": "string" + }, + { + "description": "The id of the start vertex.
", + "in": "query", + "name": "vertex", + "required": true, + "type": "string" + }, + { + "description": "Selects in or out direction for edges. If not set, any edges are returned.
", + "in": "query", + "name": "direction", + "required": false, + "type": "string" + } + ], + "responses": { + "200": { + "description": "is returned if the edge collection was found and edges were retrieved.
" + }, + "400": { + "description": "is returned if the request contains invalid parameters.
" + }, + "404": { + "description": "is returned if the edge collection was not found.
" + } + }, + "summary": " Read in- or outbound edges", + "tags": [ + "Graph edges" + ], + "x-examples": [], + "x-filename": "Graph edges - arangod/RestHandler/RestEdgeHandler.cpp, js/actions/api-edges.js" + } + }, + "/_api/endpoint": { + "get": { + "description": "\n\nReturns an array of all configured endpoints the server is listening on. For each endpoint, the array of allowed databases is returned too if set.
The result is a JSON object which has the endpoints as keys, and an array of mapped database names as values for each endpoint.
If an array of mapped databases is empty, it means that all databases can be accessed via the endpoint. If an array of mapped databases contains more than one database name, this means that any of the databases might be accessed via the endpoint, and the first database in the arry will be treated as the default database for the endpoint. The default database will be used when an incoming request does not specify a database name in the request explicitly.
Note: retrieving the array of all endpoints is allowed in the system database only. Calling this action in any other database will make the server return an error.

Example:

shell> curl --dump - http://localhost:8529/_api/endpoint\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n[ \n  { \n    \"endpoint\" : \"tcp://127.0.0.1:32239\", \n    \"databases\" : [ ] \n  } \n]\n

\n
", + "parameters": [], + "responses": { + "200": { + "description": "is returned when the array of endpoints can be determined successfully.
" + }, + "400": { + "description": "is returned if the action is not carried out in the system database.
" + }, + "405": { + "description": "The server will respond with HTTP 405 if an unsupported HTTP method is used.
" + } + }, + "summary": " Return list of all endpoints", + "tags": [ + "Administration" + ], + "x-examples": [], + "x-filename": "Administration - js/actions/_admin/app.js, js/actions/_admin/routing/app.js, js/actions/_admin/server/app.js, js/actions/_admin/database/app.js, arangod/RestHandler/RestShutdownHandler.cpp, arangod/RestHandler/RestAdminLogHandler.cpp, js/actions/api-tasks.js, js/actions/api-endpoint.js, arangod/RestHandler/RestVersionHandler.cpp, js/actions/api-system.js" + } + }, + "/_api/explain": { + "post": { + "description": "**A json post document with these Properties is required:**
  • query: the query which you want explained; If the query references any bind variables, these must also be passed in the attribute bindVars. Additional options for the query can be passed in the options attribute.
  • options: Options for the query
    • optimizer.rules: an array of to-be-included or to-be-excluded optimizer rules can be put into this attribute, telling the optimizer to include or exclude specific rules. To disable a rule, prefix its name with a `-`, to enable a rule, prefix it with a `+`. There is also a pseudo-rule `all`, which will match all optimizer rules. of type string
    • maxNumberOfPlans: an optional maximum number of plans that the optimizer is allowed to generate. Setting this attribute to a low value allows to put a cap on the amount of work the optimizer does.
    • allPlans: if set to true, all possible execution plans will be returned. The default is false, meaning only the optimal plan will be returned.
  • bindVars: key/value pairs representing the bind values of type object
\n\n
To explain how an AQL query would be executed on the server, the query string can be sent to the server via an HTTP POST request. The server will then validate the query and create an execution plan for it. The execution plan will be returned, but the query will not be executed.
The execution plan that is returned by the server can be used to estimate the probable performance of the query. Though the actual performance will depend on many different factors, the execution plan normally can provide some rough estimates on the amount of work the server needs to do in order to actually run the query.
By default, the explain operation will return the optimal plan as chosen by the query optimizer The optimal plan is the plan with the lowest total estimated cost. The plan will be returned in the attribute plan of the response object. If the option allPlans is specified in the request, the result will contain all plans created by the optimizer. The plans will then be returned in the attribute plans.
The result will also contain an attribute warnings, which is an array of warnings that occurred during optimization or execution plan creation. Additionally, a stats attribute is contained in the result with some optimizer statistics.
Each plan in the result is a JSON object with the following attributes:
  • nodes: the array of execution nodes of the plan. The array of available node types can be found [here](../Aql/Optimizer.html)
  • estimatedCost: the total estimated cost for the plan. If there are multiple plans, the optimizer will choose the plan with the lowest total cost.
  • collections: an array of collections used in the query
  • rules: an array of rules the optimizer applied. An overview of the available rules can be found [here](../Aql/Optimizer.html)
  • variables: array of variables used in the query (note: this may contain internal variables created by the optimizer)

Example: Valid query

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/explain <<EOF\n{ \n  \"query\" : \"FOR p IN products RETURN p\" \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"plan\" : { \n    \"nodes\" : [ \n      { \n        \"type\" : \"SingletonNode\", \n        \"dependencies\" : [ ], \n        \"id\" : 1, \n        \"estimatedCost\" : 1, \n        \"estimatedNrItems\" : 1 \n      }, \n      { \n        \"type\" : \"EnumerateCollectionNode\", \n        \"dependencies\" : [ \n          1 \n        ], \n        \"id\" : 2, \n        \"estimatedCost\" : 11, \n        \"estimatedNrItems\" : 10, \n        \"database\" : \"_system\", \n        \"collection\" : \"products\", \n        \"outVariable\" : { \n          \"id\" : 0, \n          \"name\" : \"p\" \n        }, \n        \"random\" : false \n      }, \n      { \n        \"type\" : \"ReturnNode\", \n        \"dependencies\" : [ \n          2 \n        ], \n        \"id\" : 3, \n        \"estimatedCost\" : 21, \n        \"estimatedNrItems\" : 10, \n        \"inVariable\" : { \n          \"id\" : 0, \n          \"name\" : \"p\" \n        } \n      } \n    ], \n    \"rules\" : [ ], \n    \"collections\" : [ \n      { \n        \"name\" : \"products\", \n        \"type\" : \"read\" \n      } \n    ], \n    \"variables\" : [ \n      { \n        \"id\" : 0, \n        \"name\" : \"p\" \n      } \n    ], \n    \"estimatedCost\" : 21, \n    \"estimatedNrItems\" : 10 \n  }, \n  \"warnings\" : [ ], \n  \"stats\" : { \n    \"rulesExecuted\" : 23, \n    \"rulesSkipped\" : 0, \n    \"plansCreated\" : 1 \n  }, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
Example: A plan with some optimizer rules applied

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/explain <<EOF\n{ \n  \"query\" : \"FOR p IN products LET a = p.id FILTER a == 4 LET name = p.name SORT p.id LIMIT 1 RETURN name\" \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"plan\" : { \n    \"nodes\" : [ \n      { \n        \"type\" : \"SingletonNode\", \n        \"dependencies\" : [ ], \n        \"id\" : 1, \n        \"estimatedCost\" : 1, \n        \"estimatedNrItems\" : 1 \n      }, \n      { \n        \"type\" : \"IndexRangeNode\", \n        \"dependencies\" : [ \n          1 \n        ], \n        \"id\" : 11, \n        \"estimatedCost\" : 11, \n        \"estimatedNrItems\" : 10, \n        \"database\" : \"_system\", \n        \"collection\" : \"products\", \n        \"outVariable\" : { \n          \"id\" : 0, \n          \"name\" : \"p\" \n        }, \n        \"ranges\" : [ \n          [ ] \n        ], \n        \"index\" : { \n          \"type\" : \"skiplist\", \n          \"id\" : \"737008071\", \n          \"unique\" : false, \n          \"sparse\" : false, \n          \"fields\" : [ \n            \"id\" \n          ] \n        }, \n        \"reverse\" : false \n      }, \n      { \n        \"type\" : \"CalculationNode\", \n        \"dependencies\" : [ \n          11 \n        ], \n        \"id\" : 3, \n        \"estimatedCost\" : 21, \n        \"estimatedNrItems\" : 10, \n        \"expression\" : { \n          \"type\" : \"attribute access\", \n          \"name\" : \"id\", \n          \"subNodes\" : [ \n            { \n              \"type\" : \"reference\", \n              \"name\" : \"p\", \n              \"id\" : 0 \n            } \n          ] \n        }, \n        \"outVariable\" : { \n          \"id\" : 1, \n          \"name\" : \"a\" \n        }, \n        \"canThrow\" : false, \n        \"expressionType\" : \"attribute\" \n      }, \n      { \n        \"type\" : \"CalculationNode\", \n        \"dependencies\" : [ \n          3 \n        ], \n        \"id\" : 4, \n        \"estimatedCost\" : 31, \n        \"estimatedNrItems\" : 10, \n        \"expression\" : { \n          \"type\" : \"compare ==\", \n          \"subNodes\" : [ \n            { \n              \"type\" : \"reference\", \n              \"name\" : \"a\", \n              \"id\" : 1 \n            }, \n            { \n              \"type\" : \"value\", \n              \"value\" : 4 \n            } \n          ] \n        }, \n        \"outVariable\" : { \n          \"id\" : 4, \n          \"name\" : \"3\" \n        }, \n        \"canThrow\" : false, \n        \"expressionType\" : \"simple\" \n      }, \n      { \n        \"type\" : \"FilterNode\", \n        \"dependencies\" : [ \n          4 \n        ], \n        \"id\" : 5, \n        \"estimatedCost\" : 41, \n        \"estimatedNrItems\" : 10, \n        \"inVariable\" : { \n          \"id\" : 4, \n          \"name\" : \"3\" \n        } \n      }, \n      { \n        \"type\" : \"LimitNode\", \n        \"dependencies\" : [ \n          5 \n        ], \n        \"id\" : 9, \n        \"estimatedCost\" : 42, \n        \"estimatedNrItems\" : 1, \n        \"offset\" : 0, \n        \"limit\" : 1, \n        \"fullCount\" : false \n      }, \n      { \n        \"type\" : \"CalculationNode\", \n        \"dependencies\" : [ \n          9 \n        ], \n        \"id\" : 6, \n        \"estimatedCost\" : 43, \n        \"estimatedNrItems\" : 1, \n        \"expression\" : { \n          \"type\" : \"attribute access\", \n          \"name\" : \"name\", \n          \"subNodes\" : [ \n            { \n              \"type\" : \"reference\", \n              \"name\" : \"p\", \n              \"id\" : 0 \n            } \n          ] \n        }, \n        \"outVariable\" : { \n          \"id\" : 2, \n          \"name\" : \"name\" \n        }, \n        \"canThrow\" : false, \n        \"expressionType\" : \"attribute\" \n      }, \n      { \n        \"type\" : \"ReturnNode\", \n        \"dependencies\" : [ \n          6 \n        ], \n        \"id\" : 10, \n        \"estimatedCost\" : 44, \n        \"estimatedNrItems\" : 1, \n        \"inVariable\" : { \n          \"id\" : 2, \n          \"name\" : \"name\" \n        } \n      } \n    ], \n    \"rules\" : [ \n      \"move-calculations-up\", \n      \"remove-redundant-calculations\", \n      \"move-calculations-up-2\", \n      \"use-index-for-sort\", \n      \"remove-unnecessary-calculations-2\", \n      \"move-calculations-down\" \n    ], \n    \"collections\" : [ \n      { \n        \"name\" : \"products\", \n        \"type\" : \"read\" \n      } \n    ], \n    \"variables\" : [ \n      { \n        \"id\" : 6, \n        \"name\" : \"5\" \n      }, \n      { \n        \"id\" : 4, \n        \"name\" : \"3\" \n      }, \n      { \n        \"id\" : 2, \n        \"name\" : \"name\" \n      }, \n      { \n        \"id\" : 1, \n        \"name\" : \"a\" \n      }, \n      { \n        \"id\" : 0, \n        \"name\" : \"p\" \n      } \n    ], \n    \"estimatedCost\" : 44, \n    \"estimatedNrItems\" : 1 \n  }, \n  \"warnings\" : [ ], \n  \"stats\" : { \n    \"rulesExecuted\" : 35, \n    \"rulesSkipped\" : 0, \n    \"plansCreated\" : 1 \n  }, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
Example: Using some options

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/explain <<EOF\n{ \n  \"query\" : \"FOR p IN products LET a = p.id FILTER a == 4 LET name = p.name SORT p.id LIMIT 1 RETURN name\", \n  \"options\" : { \n    \"maxNumberOfPlans\" : 2, \n    \"allPlans\" : true, \n    \"optimizer\" : { \n      \"rules\" : [ \n        \"-all\", \n        \"+use-index-for-sort\", \n        \"+use-index-range\" \n      ] \n    } \n  } \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"plans\" : [ \n    { \n      \"nodes\" : [ \n        { \n          \"type\" : \"SingletonNode\", \n          \"dependencies\" : [ ], \n          \"id\" : 1, \n          \"estimatedCost\" : 1, \n          \"estimatedNrItems\" : 1 \n        }, \n        { \n          \"type\" : \"IndexRangeNode\", \n          \"dependencies\" : [ \n            1 \n          ], \n          \"id\" : 11, \n          \"estimatedCost\" : 11, \n          \"estimatedNrItems\" : 10, \n          \"database\" : \"_system\", \n          \"collection\" : \"products\", \n          \"outVariable\" : { \n            \"id\" : 0, \n            \"name\" : \"p\" \n          }, \n          \"ranges\" : [ \n            [ ] \n          ], \n          \"index\" : { \n            \"type\" : \"skiplist\", \n            \"id\" : \"739563975\", \n            \"unique\" : false, \n            \"sparse\" : false, \n            \"fields\" : [ \n              \"id\" \n            ] \n          }, \n          \"reverse\" : false \n        }, \n        { \n          \"type\" : \"CalculationNode\", \n          \"dependencies\" : [ \n            11 \n          ], \n          \"id\" : 3, \n          \"estimatedCost\" : 21, \n          \"estimatedNrItems\" : 10, \n          \"expression\" : { \n            \"type\" : \"attribute access\", \n            \"name\" : \"id\", \n            \"subNodes\" : [ \n              { \n                \"type\" : \"reference\", \n                \"name\" : \"p\", \n                \"id\" : 0 \n              } \n            ] \n          }, \n          \"outVariable\" : { \n            \"id\" : 1, \n            \"name\" : \"a\" \n          }, \n          \"canThrow\" : false, \n          \"expressionType\" : \"attribute\" \n        }, \n        { \n          \"type\" : \"CalculationNode\", \n          \"dependencies\" : [ \n            3 \n          ], \n          \"id\" : 4, \n          \"estimatedCost\" : 31, \n          \"estimatedNrItems\" : 10, \n          \"expression\" : { \n            \"type\" : \"compare ==\", \n            \"subNodes\" : [ \n              { \n                \"type\" : \"reference\", \n                \"name\" : \"a\", \n                \"id\" : 1 \n              }, \n              { \n                \"type\" : \"value\", \n                \"value\" : 4 \n              } \n            ] \n          }, \n          \"outVariable\" : { \n            \"id\" : 4, \n            \"name\" : \"3\" \n          }, \n          \"canThrow\" : false, \n          \"expressionType\" : \"simple\" \n        }, \n        { \n          \"type\" : \"FilterNode\", \n          \"dependencies\" : [ \n            4 \n          ], \n          \"id\" : 5, \n          \"estimatedCost\" : 41, \n          \"estimatedNrItems\" : 10, \n          \"inVariable\" : { \n            \"id\" : 4, \n            \"name\" : \"3\" \n          } \n        }, \n        { \n          \"type\" : \"CalculationNode\", \n          \"dependencies\" : [ \n            5 \n          ], \n          \"id\" : 6, \n          \"estimatedCost\" : 51, \n          \"estimatedNrItems\" : 10, \n          \"expression\" : { \n            \"type\" : \"attribute access\", \n            \"name\" : \"name\", \n            \"subNodes\" : [ \n              { \n                \"type\" : \"reference\", \n                \"name\" : \"p\", \n                \"id\" : 0 \n              } \n            ] \n          }, \n          \"outVariable\" : { \n            \"id\" : 2, \n            \"name\" : \"name\" \n          }, \n          \"canThrow\" : false, \n          \"expressionType\" : \"attribute\" \n        }, \n        { \n          \"type\" : \"CalculationNode\", \n          \"dependencies\" : [ \n            6 \n          ], \n          \"id\" : 7, \n          \"estimatedCost\" : 61, \n          \"estimatedNrItems\" : 10, \n          \"expression\" : { \n            \"type\" : \"attribute access\", \n            \"name\" : \"id\", \n            \"subNodes\" : [ \n              { \n                \"type\" : \"reference\", \n                \"name\" : \"p\", \n                \"id\" : 0 \n              } \n            ] \n          }, \n          \"outVariable\" : { \n            \"id\" : 6, \n            \"name\" : \"5\" \n          }, \n          \"canThrow\" : false, \n          \"expressionType\" : \"attribute\" \n        }, \n        { \n          \"type\" : \"LimitNode\", \n          \"dependencies\" : [ \n            7 \n          ], \n          \"id\" : 9, \n          \"estimatedCost\" : 62, \n          \"estimatedNrItems\" : 1, \n          \"offset\" : 0, \n          \"limit\" : 1, \n          \"fullCount\" : false \n        }, \n        { \n          \"type\" : \"ReturnNode\", \n          \"dependencies\" : [ \n            9 \n          ], \n          \"id\" : 10, \n          \"estimatedCost\" : 63, \n          \"estimatedNrItems\" : 1, \n          \"inVariable\" : { \n            \"id\" : 2, \n            \"name\" : \"name\" \n          } \n        } \n      ], \n      \"rules\" : [ \n        \"use-index-for-sort\" \n      ], \n      \"collections\" : [ \n        { \n          \"name\" : \"products\", \n          \"type\" : \"read\" \n        } \n      ], \n      \"variables\" : [ \n        { \n          \"id\" : 6, \n          \"name\" : \"5\" \n        }, \n        { \n          \"id\" : 4, \n          \"name\" : \"3\" \n        }, \n        { \n          \"id\" : 2, \n          \"name\" : \"name\" \n        }, \n        { \n          \"id\" : 1, \n          \"name\" : \"a\" \n        }, \n        { \n          \"id\" : 0, \n          \"name\" : \"p\" \n        } \n      ], \n      \"estimatedCost\" : 63, \n      \"estimatedNrItems\" : 1 \n    } \n  ], \n  \"warnings\" : [ ], \n  \"stats\" : { \n    \"rulesExecuted\" : 4, \n    \"rulesSkipped\" : 31, \n    \"plansCreated\" : 1 \n  }, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
Example: Returning all plans

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/explain <<EOF\n{ \n  \"query\" : \"FOR p IN products FILTER p.id == 25 RETURN p\", \n  \"options\" : { \n    \"allPlans\" : true \n  } \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"plans\" : [ \n    { \n      \"nodes\" : [ \n        { \n          \"type\" : \"SingletonNode\", \n          \"dependencies\" : [ ], \n          \"id\" : 1, \n          \"estimatedCost\" : 1, \n          \"estimatedNrItems\" : 1 \n        }, \n        { \n          \"type\" : \"IndexRangeNode\", \n          \"dependencies\" : [ \n            1 \n          ], \n          \"id\" : 6, \n          \"estimatedCost\" : 1.9899995050000001, \n          \"estimatedNrItems\" : 1, \n          \"database\" : \"_system\", \n          \"collection\" : \"products\", \n          \"outVariable\" : { \n            \"id\" : 0, \n            \"name\" : \"p\" \n          }, \n          \"ranges\" : [ \n            [ \n              { \n                \"variable\" : \"p\", \n                \"attr\" : \"id\", \n                \"lowConst\" : { \n                  \"bound\" : 25, \n                  \"include\" : true, \n                  \"isConstant\" : true \n                }, \n                \"highConst\" : { \n                  \"bound\" : 25, \n                  \"include\" : true, \n                  \"isConstant\" : true \n                }, \n                \"lows\" : [ ], \n                \"highs\" : [ ], \n                \"valid\" : true, \n                \"equality\" : true \n              } \n            ] \n          ], \n          \"index\" : { \n            \"type\" : \"hash\", \n            \"id\" : \"736025031\", \n            \"unique\" : false, \n            \"sparse\" : false, \n            \"selectivityEstimate\" : 1, \n            \"fields\" : [ \n              \"id\" \n            ] \n          }, \n          \"reverse\" : false \n        }, \n        { \n          \"type\" : \"ReturnNode\", \n          \"dependencies\" : [ \n            6 \n          ], \n          \"id\" : 5, \n          \"estimatedCost\" : 2.989999505, \n          \"estimatedNrItems\" : 1, \n          \"inVariable\" : { \n            \"id\" : 0, \n            \"name\" : \"p\" \n          } \n        } \n      ], \n      \"rules\" : [ \n        \"use-index-range\", \n        \"remove-filter-covered-by-index\" \n      ], \n      \"collections\" : [ \n        { \n          \"name\" : \"products\", \n          \"type\" : \"read\" \n        } \n      ], \n      \"variables\" : [ \n        { \n          \"id\" : 2, \n          \"name\" : \"1\" \n        }, \n        { \n          \"id\" : 0, \n          \"name\" : \"p\" \n        } \n      ], \n      \"estimatedCost\" : 2.989999505, \n      \"estimatedNrItems\" : 1 \n    } \n  ], \n  \"warnings\" : [ ], \n  \"stats\" : { \n    \"rulesExecuted\" : 23, \n    \"rulesSkipped\" : 0, \n    \"plansCreated\" : 1 \n  }, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
Example: A query that produces a warning

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/explain <<EOF\n{ \n  \"query\" : \"FOR i IN 1..10 RETURN 1 / 0\" \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"plan\" : { \n    \"nodes\" : [ \n      { \n        \"type\" : \"SingletonNode\", \n        \"dependencies\" : [ ], \n        \"id\" : 1, \n        \"estimatedCost\" : 1, \n        \"estimatedNrItems\" : 1 \n      }, \n      { \n        \"type\" : \"CalculationNode\", \n        \"dependencies\" : [ \n          1 \n        ], \n        \"id\" : 2, \n        \"estimatedCost\" : 2, \n        \"estimatedNrItems\" : 1, \n        \"expression\" : { \n          \"type\" : \"range\", \n          \"subNodes\" : [ \n            { \n              \"type\" : \"value\", \n              \"value\" : 1 \n            }, \n            { \n              \"type\" : \"value\", \n              \"value\" : 10 \n            } \n          ] \n        }, \n        \"outVariable\" : { \n          \"id\" : 2, \n          \"name\" : \"1\" \n        }, \n        \"canThrow\" : false, \n        \"expressionType\" : \"simple\" \n      }, \n      { \n        \"type\" : \"CalculationNode\", \n        \"dependencies\" : [ \n          2 \n        ], \n        \"id\" : 4, \n        \"estimatedCost\" : 3, \n        \"estimatedNrItems\" : 1, \n        \"expression\" : { \n          \"type\" : \"value\", \n          \"value\" : null \n        }, \n        \"outVariable\" : { \n          \"id\" : 4, \n          \"name\" : \"3\" \n        }, \n        \"canThrow\" : false, \n        \"expressionType\" : \"json\" \n      }, \n      { \n        \"type\" : \"EnumerateListNode\", \n        \"dependencies\" : [ \n          4 \n        ], \n        \"id\" : 3, \n        \"estimatedCost\" : 13, \n        \"estimatedNrItems\" : 10, \n        \"inVariable\" : { \n          \"id\" : 2, \n          \"name\" : \"1\" \n        }, \n        \"outVariable\" : { \n          \"id\" : 0, \n          \"name\" : \"i\" \n        } \n      }, \n      { \n        \"type\" : \"ReturnNode\", \n        \"dependencies\" : [ \n          3 \n        ], \n        \"id\" : 5, \n        \"estimatedCost\" : 23, \n        \"estimatedNrItems\" : 10, \n        \"inVariable\" : { \n          \"id\" : 4, \n          \"name\" : \"3\" \n        } \n      } \n    ], \n    \"rules\" : [ \n      \"move-calculations-up\", \n      \"move-calculations-up-2\" \n    ], \n    \"collections\" : [ ], \n    \"variables\" : [ \n      { \n        \"id\" : 4, \n        \"name\" : \"3\" \n      }, \n      { \n        \"id\" : 2, \n        \"name\" : \"1\" \n      }, \n      { \n        \"id\" : 0, \n        \"name\" : \"i\" \n      } \n    ], \n    \"estimatedCost\" : 23, \n    \"estimatedNrItems\" : 10 \n  }, \n  \"warnings\" : [ \n    { \n      \"code\" : 1562, \n      \"message\" : \"division by zero\" \n    } \n  ], \n  \"stats\" : { \n    \"rulesExecuted\" : 23, \n    \"rulesSkipped\" : 0, \n    \"plansCreated\" : 1 \n  }, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
Example: Invalid query (missing bind parameter)

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/explain <<EOF\n{ \n  \"query\" : \"FOR p IN products FILTER p.id == @id LIMIT 2 RETURN p.n\" \n}\nEOF\n\nHTTP/1.1 400 Bad Request\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"error\" : true, \n  \"code\" : 400, \n  \"errorNum\" : 1551, \n  \"errorMessage\" : \"no value specified for declared bind parameter 'id' (while parsing)\" \n}\n

\n
Example: The data returned in the plan attribute of the result contains one element per AQL top-level statement (i.e. FOR, RETURN, FILTER etc.). If the query optimizer removed some unnecessary statements, the result might also contain less elements than there were top-level statements in the AQL query. The following example shows a query with a non-sensible filter condition that the optimizer has removed so that there are less top-level statements.

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/explain <<EOF\n{ \"query\" : \"FOR i IN [ 1, 2, 3 ] FILTER 1 == 2 RETURN i\" }\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"plan\" : { \n    \"nodes\" : [ \n      { \n        \"type\" : \"SingletonNode\", \n        \"dependencies\" : [ ], \n        \"id\" : 1, \n        \"estimatedCost\" : 1, \n        \"estimatedNrItems\" : 1 \n      }, \n      { \n        \"type\" : \"CalculationNode\", \n        \"dependencies\" : [ \n          1 \n        ], \n        \"id\" : 2, \n        \"estimatedCost\" : 2, \n        \"estimatedNrItems\" : 1, \n        \"expression\" : { \n          \"type\" : \"array\", \n          \"subNodes\" : [ \n            { \n              \"type\" : \"value\", \n              \"value\" : 1 \n            }, \n            { \n              \"type\" : \"value\", \n              \"value\" : 2 \n            }, \n            { \n              \"type\" : \"value\", \n              \"value\" : 3 \n            } \n          ] \n        }, \n        \"outVariable\" : { \n          \"id\" : 2, \n          \"name\" : \"1\" \n        }, \n        \"canThrow\" : false, \n        \"expressionType\" : \"json\" \n      }, \n      { \n        \"type\" : \"NoResultsNode\", \n        \"dependencies\" : [ \n          2 \n        ], \n        \"id\" : 7, \n        \"estimatedCost\" : 0.5, \n        \"estimatedNrItems\" : 0 \n      }, \n      { \n        \"type\" : \"EnumerateListNode\", \n        \"dependencies\" : [ \n          7 \n        ], \n        \"id\" : 3, \n        \"estimatedCost\" : 0.5, \n        \"estimatedNrItems\" : 0, \n        \"inVariable\" : { \n          \"id\" : 2, \n          \"name\" : \"1\" \n        }, \n        \"outVariable\" : { \n          \"id\" : 0, \n          \"name\" : \"i\" \n        } \n      }, \n      { \n        \"type\" : \"ReturnNode\", \n        \"dependencies\" : [ \n          3 \n        ], \n        \"id\" : 6, \n        \"estimatedCost\" : 0.5, \n        \"estimatedNrItems\" : 0, \n        \"inVariable\" : { \n          \"id\" : 0, \n          \"name\" : \"i\" \n        } \n      } \n    ], \n    \"rules\" : [ \n      \"move-calculations-up\", \n      \"move-filters-up\", \n      \"remove-unnecessary-filters\", \n      \"remove-unnecessary-calculations\" \n    ], \n    \"collections\" : [ ], \n    \"variables\" : [ \n      { \n        \"id\" : 4, \n        \"name\" : \"3\" \n      }, \n      { \n        \"id\" : 2, \n        \"name\" : \"1\" \n      }, \n      { \n        \"id\" : 0, \n        \"name\" : \"i\" \n      } \n    ], \n    \"estimatedCost\" : 0.5, \n    \"estimatedNrItems\" : 0 \n  }, \n  \"warnings\" : [ ], \n  \"stats\" : { \n    \"rulesExecuted\" : 23, \n    \"rulesSkipped\" : 0, \n    \"plansCreated\" : 1 \n  }, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
", + "parameters": [ + { + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "$ref": "#/definitions/JSF_post_api_explain" + }, + "x-description-offset": 59 + } + ], + "responses": { + "200": { + "description": "If the query is valid, the server will respond with HTTP 200 and return the optimal execution plan in the plan attribute of the response. If option allPlans was set in the request, an array of plans will be returned in the allPlans attribute instead.
" + }, + "400": { + "description": "The server will respond with HTTP 400 in case of a malformed request, or if the query contains a parse error. The body of the response will contain the error details embedded in a JSON object. Omitting bind variables if the query references any will also result in an HTTP 400 error.
" + }, + "404": { + "description": "The server will respond with HTTP 404 in case a non-existing collection is accessed in the query.
" + } + }, + "summary": " Explain an AQL query", + "tags": [ + "AQL" + ], + "x-examples": [], + "x-filename": "AQL - arangod/RestHandler/RestQueryHandler.cpp, js/actions/api-aqlfunction.js, js/actions/api-explain.js, arangod/RestHandler/RestQueryCacheHandler.cpp" + } + }, + "/_api/export": { + "post": { + "description": "**A json post document with these Properties is required:**
  • count: boolean flag that indicates whether the number of documents in the result set should be returned in the \"count\" attribute of the result (optional). Calculating the \"count\" attribute might in the future have a performance impact so this option is turned off by default, and \"count\" is only returned when requested.
  • restrict: an object containing an array of attribute names that will be included or excluded when returning result documents.
    Not specifying restrict will by default return all attributes of each document.
    • fields: Contains an array of attribute names to include or exclude. Matching of attribute names for inclusion or exclusion will be done on the top level only. Specifying names of nested attributes is not supported at the moment.
      of type string
    • type: has to be be set to either include or exclude depending on which you want to use
  • batchSize: maximum number of result documents to be transferred from the server to the client in one roundtrip (optional). If this attribute is not set, a server-controlled default value will be used.
  • flush: if set to true, a WAL flush operation will be executed prior to the export. The flush operation will start copying documents from the WAL to the collection's datafiles. There will be an additional wait time of up to flushWait seconds after the flush to allow the WAL collector to change the adjusted document meta-data to point into the datafiles, too. The default value is false (i.e. no flush) so most recently inserted or updated documents from the collection might be missing in the export.
  • flushWait: maximum wait time in seconds after a flush operation. The default value is 10. This option only has an effect when flush is set to true.
  • limit: an optional limit value, determining the maximum number of documents to be included in the cursor. Omitting the limit attribute or setting it to 0 will lead to no limit being used. If a limit is used, it is undefined which documents from the collection will be included in the export and which will be excluded. This is because there is no natural order of documents in a collection.
  • ttl: an optional time-to-live for the cursor (in seconds). The cursor will be removed on the server automatically after the specified amount of time. This is useful to ensure garbage collection of cursors that are not fully fetched by clients. If not set, a server-defined value will be used.
\n\nA call to this method creates a cursor containing all documents in the specified collection. In contrast to other data-producing APIs, the internal data structures produced by the export API are more lightweight, so it is the preferred way to retrieve all documents from a collection.
Documents are returned in a similar manner as in the `/_api/cursor` REST API. If all documents of the collection fit into the first batch, then no cursor will be created, and the result object's hasMore attribute will be set to false. If not all documents fit into the first batch, then the result object's hasMore attribute will be set to true, and the id attribute of the result will contain a cursor id.
The order in which the documents are returned is not specified.
By default, only those documents from the collection will be returned that are stored in the collection's datafiles. Documents that are present in the write-ahead log (WAL) at the time the export is run will not be exported.
To export these documents as well, the caller can issue a WAL flush request before calling the export API or set the flush attribute. Setting the flush option will trigger a WAL flush before the export so documents get copied from the WAL to the collection datafiles.
If the result set can be created by the server, the server will respond with HTTP 201. The body of the response will contain a JSON object with the result set.
The returned JSON object has the following properties:
  • error: boolean flag to indicate that an error occurred (false in this case)
  • code: the HTTP status code
  • result: an array of result documents (might be empty if the collection was empty)
  • hasMore: a boolean indicator whether there are more results available for the cursor on the server
  • count: the total number of result documents available (only available if the query was executed with the count attribute set)
  • id: id of temporary cursor created on the server (optional, see above)
If the JSON representation is malformed or the query specification is missing from the request, the server will respond with HTTP 400.
The body of the response will contain a JSON object with additional error details. The object has the following attributes:
  • error: boolean flag to indicate that an error occurred (true in this case)
  • code: the HTTP status code
  • errorNum: the server error number
  • errorMessage: a descriptive error message
Clients should always delete an export cursor result as early as possible because a lingering export cursor will prevent the underlying collection from being compacted or unloaded. By default, unused cursors will be deleted automatically after a server-defined idle time, and clients can adjust this idle time by setting the ttl value.
Note: this API is currently not supported on cluster coordinators.
", + "parameters": [ + { + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "$ref": "#/definitions/JSF_post_api_export" + }, + "x-description-offset": 59 + }, + { + "description": "The name of the collection to export.
", + "in": "query", + "name": "collection", + "required": true, + "type": "string" + } + ], + "responses": { + "201": { + "description": "is returned if the result set can be created by the server.
" + }, + "400": { + "description": "is returned if the JSON representation is malformed or the query specification is missing from the request.
" + }, + "404": { + "description": "The server will respond with HTTP 404 in case a non-existing collection is accessed in the query.
" + }, + "405": { + "description": "The server will respond with HTTP 405 if an unsupported HTTP method is used.
" + }, + "501": { + "description": "The server will respond with HTTP 501 if this API is called on a cluster coordinator.

" + } + }, + "summary": " Create export cursor", + "tags": [ + "Bulk" + ], + "x-examples": [], + "x-filename": "Bulk - arangod/RestHandler/RestExportHandler.cpp, arangod/RestHandler/RestImportHandler.cpp, arangod/RestHandler/RestBatchHandler.cpp" + } + }, + "/_api/gharial": { + "get": { + "description": "\n\nLists all graph names stored in this database.

Example:

shell> curl --dump - http://localhost:8529/_api/gharial\n\nHTTP/1.1 200 OK\ncontent-type: application/json\n\n{ \n  \"error\" : false, \n  \"code\" : 202, \n  \"graphs\" : [ \n    { \n      \"_id\" : \"_graphs/social\", \n      \"_key\" : \"social\", \n      \"_rev\" : \"557308359\", \n      \"edgeDefinitions\" : [ \n        { \n          \"collection\" : \"relation\", \n          \"from\" : [ \n            \"female\", \n            \"male\" \n          ], \n          \"to\" : [ \n            \"female\", \n            \"male\" \n          ] \n        } \n      ], \n      \"orphanCollections\" : [ ] \n    }, \n    { \n      \"_id\" : \"_graphs/routeplanner\", \n      \"_key\" : \"routeplanner\", \n      \"_rev\" : \"560650695\", \n      \"orphanCollections\" : [ ], \n      \"edgeDefinitions\" : [ \n        { \n          \"collection\" : \"germanHighway\", \n          \"from\" : [ \n            \"germanCity\" \n          ], \n          \"to\" : [ \n            \"germanCity\" \n          ] \n        }, \n        { \n          \"collection\" : \"frenchHighway\", \n          \"from\" : [ \n            \"frenchCity\" \n          ], \n          \"to\" : [ \n            \"frenchCity\" \n          ] \n        }, \n        { \n          \"collection\" : \"internationalHighway\", \n          \"from\" : [ \n            \"frenchCity\", \n            \"germanCity\" \n          ], \n          \"to\" : [ \n            \"frenchCity\", \n            \"germanCity\" \n          ] \n        } \n      ] \n    } \n  ] \n}\n

\n
", + "parameters": [], + "responses": { + "200": { + "description": "Is returned if the module is available and the graphs could be listed.
" + } + }, + "summary": " List all graphs", + "tags": [ + "Graph" + ], + "x-examples": [], + "x-filename": "Graph - js/apps/system/_api/gharial/APP/gharial.js" + }, + "post": { + "description": "\n\nThe creation of a graph requires the name of the graph and a definition of its edges. [See also edge definitions](../GeneralGraphs/Management.md#edge-definitions).
**A json post document with these Properties is required:**
  • orphanCollections: An array of additional vertex collections.
  • edgeDefinitions: An array of definitions for the edge
  • name: Name of the graph.

Example:

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/gharial <<EOF\n{ \n  \"name\" : \"myGraph\", \n  \"edgeDefinitions\" : [ \n    { \n      \"collection\" : \"edges\", \n      \"from\" : [ \n        \"startVertices\" \n      ], \n      \"to\" : [ \n        \"endVertices\" \n      ] \n    } \n  ] \n}\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json\netag: 527817159\n\n{ \n  \"error\" : false, \n  \"code\" : 201, \n  \"graph\" : { \n    \"name\" : \"myGraph\", \n    \"edgeDefinitions\" : [ \n      { \n        \"collection\" : \"edges\", \n        \"from\" : [ \n          \"startVertices\" \n        ], \n        \"to\" : [ \n          \"endVertices\" \n        ] \n      } \n    ], \n    \"orphanCollections\" : [ ], \n    \"_id\" : \"_graphs/myGraph\", \n    \"_rev\" : \"527817159\" \n  } \n}\n

\n
", + "parameters": [ + { + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "$ref": "#/definitions/JSF_general_graph_create_http_examples" + }, + "x-description-offset": 229 + } + ], + "responses": { + "201": { + "description": "Is returned if the graph could be created. The body contains the graph configuration that has been stored.
" + }, + "409": { + "description": "Returned if there is a conflict storing the graph. This can occur either if a graph with this name is already stored, or if there is one edge definition with a the same [edge collection](../Glossary/index.html#edge_collection) but a different signature used in any other graph.
" + } + }, + "summary": " Create a graph", + "tags": [ + "Graph" + ], + "x-examples": [], + "x-filename": "Graph - js/apps/system/_api/gharial/APP/gharial.js" + } + }, + "/_api/gharial/{graph-name}": { + "delete": { + "description": "\n\nRemoves a graph from the collection \\_graphs.

Example:

shell> curl -X DELETE --dump - http://localhost:8529/_api/gharial/social\n\nHTTP/1.1 200 OK\ncontent-type: application/json\n\n{ \n  \"error\" : false, \n  \"code\" : 200, \n  \"removed\" : true \n}\n

\n
", + "parameters": [], + "responses": { + "200": { + "description": "Returned if the graph could be dropped.
" + }, + "404": { + "description": "Returned if no graph with this name could be found.
" + } + }, + "summary": " Drop a graph", + "tags": [ + "Graph" + ], + "x-examples": [], + "x-filename": "Graph - js/apps/system/_api/gharial/APP/gharial.js" + }, + "get": { + "description": "\n\nGets a graph from the collection \\_graphs. Returns the definition content of this graph.

Example:

shell> curl --dump - http://localhost:8529/_api/gharial/myGraph\n\nHTTP/1.1 200 OK\ncontent-type: application/json\netag: 552131015\n\n{ \n  \"error\" : false, \n  \"code\" : 200, \n  \"graph\" : { \n    \"name\" : \"myGraph\", \n    \"edgeDefinitions\" : [ \n      { \n        \"collection\" : \"edges\", \n        \"from\" : [ \n          \"startVertices\" \n        ], \n        \"to\" : [ \n          \"endVertices\" \n        ] \n      } \n    ], \n    \"orphanCollections\" : [ ], \n    \"_id\" : \"_graphs/myGraph\", \n    \"_rev\" : \"552131015\" \n  } \n}\n

\n
", + "parameters": [], + "responses": { + "200": { + "description": "Returned if the graph could be found.
" + }, + "404": { + "description": "Returned if no graph with this name could be found.
" + } + }, + "summary": " Get a graph", + "tags": [ + "Graph" + ], + "x-examples": [], + "x-filename": "Graph - js/apps/system/_api/gharial/APP/gharial.js" + } + }, + "/_api/gharial/{graph-name}/edge": { + "get": { + "description": "\n\nLists all edge collections within this graph.

Example:

shell> curl --dump - http://localhost:8529/_api/gharial/social/edge\n\nHTTP/1.1 200 OK\ncontent-type: application/json\n\n{ \n  \"error\" : false, \n  \"code\" : 200, \n  \"collections\" : [ \n    \"relation\" \n  ] \n}\n

\n
", + "parameters": [], + "responses": { + "200": { + "description": "Is returned if the edge definitions could be listed.
" + }, + "404": { + "description": "Returned if no graph with this name could be found.
" + } + }, + "summary": " List edge definitions", + "tags": [ + "Graph" + ], + "x-examples": [], + "x-filename": "Graph - js/apps/system/_api/gharial/APP/gharial.js" + }, + "post": { + "description": "\n\nAdds an additional edge definition to the graph. This edge definition has to contain a collection and an array of each from and to vertex collections. An edge definition can only be added if this definition is either not used in any other graph, or it is used with exactly the same definition. It is not possible to store a definition \"e\" from \"v1\" to \"v2\" in the one graph, and \"e\" from \"v2\" to \"v1\" in the other graph.
**A json post document with these Properties is required:**
  • to: One or many edge collections that can contain target vertices. of type string
  • from: One or many vertex collections that can contain source vertices. of type string
  • collection: The name of the edge collection to be used.

Example:

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/gharial/social/edge <<EOF\n{ \n  \"collection\" : \"lives_in\", \n  \"from\" : [ \n    \"female\", \n    \"male\" \n  ], \n  \"to\" : [ \n    \"city\" \n  ] \n}\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json\netag: 514972103\n\n{ \n  \"error\" : false, \n  \"code\" : 201, \n  \"graph\" : { \n    \"name\" : \"social\", \n    \"edgeDefinitions\" : [ \n      { \n        \"collection\" : \"relation\", \n        \"from\" : [ \n          \"female\", \n          \"male\" \n        ], \n        \"to\" : [ \n          \"female\", \n          \"male\" \n        ] \n      }, \n      { \n        \"collection\" : \"lives_in\", \n        \"from\" : [ \n          \"female\", \n          \"male\" \n        ], \n        \"to\" : [ \n          \"city\" \n        ] \n      } \n    ], \n    \"orphanCollections\" : [ ], \n    \"_id\" : \"_graphs/social\", \n    \"_rev\" : \"514972103\" \n  } \n}\n

\n
", + "parameters": [ + { + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "$ref": "#/definitions/JSF_general_graph_edge_definition_add_http_examples" + }, + "x-description-offset": 537 + } + ], + "responses": { + "200": { + "description": "Returned if the definition could be added successfully.
" + }, + "400": { + "description": "Returned if the defininition could not be added, the edge collection is used in an other graph with a different signature.
" + }, + "404": { + "description": "Returned if no graph with this name could be found.
" + } + }, + "summary": " Add edge definition", + "tags": [ + "Graph" + ], + "x-examples": [], + "x-filename": "Graph - js/apps/system/_api/gharial/APP/gharial.js" + } + }, + "/_api/gharial/{graph-name}/edge/{collection-name}": { + "post": { + "description": "\n\nCreates a new edge in the collection. Within the body the has to contain a \\_from and \\_to value referencing to valid vertices in the graph. Furthermore the edge has to be valid in the definition of this [edge collection](../Glossary/index.html#edge_collection).
free style json body
Example:

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/gharial/social/edge/relation <<EOF\n{ \n  \"type\" : \"friend\", \n  \"_from\" : \"female/alice\", \n  \"_to\" : \"female/diana\" \n}\nEOF\n\nHTTP/1.1 202 Accepted\ncontent-type: application/json\netag: 513464775\n\n{ \n  \"error\" : false, \n  \"code\" : 202, \n  \"edge\" : { \n    \"_id\" : \"relation/513464775\", \n    \"_rev\" : \"513464775\", \n    \"_key\" : \"513464775\" \n  } \n}\n

\n
", + "parameters": [ + { + "description": "The body has to be the JSON object to be stored.
", + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "additionalProperties": {}, + "type": "object" + }, + "x-description-offset": 303 + } + ], + "responses": { + "201": { + "description": "Returned if the edge could be created.
" + }, + "202": { + "description": "Returned if the request was successful but waitForSync is false.
" + }, + "404": { + "description": "Returned if no graph with this name, no edge collection or no edge with this id could be found.
" + } + }, + "summary": " Create an edge", + "tags": [ + "Graph" + ], + "x-examples": [], + "x-filename": "Graph - js/apps/system/_api/gharial/APP/gharial.js" + } + }, + "/_api/gharial/{graph-name}/edge/{collection-name}/{edge-key}": { + "delete": { + "description": "\n\nRemoves an edge from the collection.

Example:

shell> curl -X DELETE --dump - http://localhost:8529/_api/gharial/social/edge/relation/aliceAndBob\n\nHTTP/1.1 202 Accepted\ncontent-type: application/json\n\n{ \n  \"error\" : false, \n  \"code\" : 202, \n  \"removed\" : true \n}\n

\n
", + "parameters": [], + "responses": { + "200": { + "description": "Returned if the edge could be removed.
" + }, + "202": { + "description": "Returned if the request was successful but waitForSync is false.
" + }, + "404": { + "description": "Returned if no graph with this name, no edge collection or no edge with this id could be found.
" + }, + "412": { + "description": "Returned if if-match header is given, but the documents revision is different.
" + } + }, + "summary": " Remove an edge", + "tags": [ + "Graph" + ], + "x-examples": [], + "x-filename": "Graph - js/apps/system/_api/gharial/APP/gharial.js" + }, + "get": { + "description": "\n\nGets an edge from the given collection.

Example:

shell> curl --dump - http://localhost:8529/_api/gharial/social/edge/relation/aliceAndBob\n\nHTTP/1.1 200 OK\ncontent-type: application/json\netag: 549837255\n\n{ \n  \"error\" : false, \n  \"code\" : 200, \n  \"edge\" : { \n    \"_id\" : \"relation/aliceAndBob\", \n    \"_key\" : \"aliceAndBob\", \n    \"_rev\" : \"549837255\", \n    \"_from\" : \"female/alice\", \n    \"_to\" : \"male/bob\", \n    \"type\" : \"married\" \n  } \n}\n

\n
", + "parameters": [], + "responses": { + "200": { + "description": "Returned if the edge could be found.
" + }, + "404": { + "description": "Returned if no graph with this name, no edge collection or no edge with this id could be found.
" + }, + "412": { + "description": "Returned if if-match header is given, but the documents revision is different.
" + } + }, + "summary": " Get an edge", + "tags": [ + "Graph" + ], + "x-examples": [], + "x-filename": "Graph - js/apps/system/_api/gharial/APP/gharial.js" + }, + "patch": { + "description": "\n\nUpdates the data of the specific edge in the collection.
free style json body
Example:

shell> curl -X PATCH --data-binary @- --dump - http://localhost:8529/_api/gharial/social/edge/relation/aliceAndBob <<EOF\n{ \n  \"since\" : \"01.01.2001\" \n}\nEOF\n\nHTTP/1.1 202 Accepted\ncontent-type: application/json\netag: 580639175\n\n{ \n  \"error\" : false, \n  \"code\" : 202, \n  \"edge\" : { \n    \"_id\" : \"relation/aliceAndBob\", \n    \"_rev\" : \"580639175\", \n    \"_oldRev\" : \"579525063\", \n    \"_key\" : \"aliceAndBob\" \n  } \n}\n

\n
", + "parameters": [ + { + "description": "The body has to be a JSON object containing the attributes to be updated.
", + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "additionalProperties": {}, + "type": "object" + }, + "x-description-offset": 63 + } + ], + "responses": { + "200": { + "description": "Returned if the edge could be updated.
" + }, + "202": { + "description": "Returned if the request was successful but waitForSync is false.
" + }, + "404": { + "description": "Returned if no graph with this name, no edge collection or no edge with this id could be found.
" + } + }, + "summary": " Modify an edge", + "tags": [ + "Graph" + ], + "x-examples": [], + "x-filename": "Graph - js/apps/system/_api/gharial/APP/gharial.js" + }, + "put": { + "description": "\n\nReplaces the data of an edge in the collection.
free style json body
Example:

shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/gharial/social/edge/relation/aliceAndBob <<EOF\n{ \n  \"type\" : \"divorced\" \n}\nEOF\n\nHTTP/1.1 202 Accepted\ncontent-type: application/json\netag: 584505799\n\n{ \n  \"error\" : false, \n  \"code\" : 202, \n  \"edge\" : { \n    \"_id\" : \"relation/aliceAndBob\", \n    \"_rev\" : \"584505799\", \n    \"_oldRev\" : \"583522759\", \n    \"_key\" : \"aliceAndBob\" \n  } \n}\n

\n
", + "parameters": [ + { + "description": "The body has to be the JSON object to be stored.
", + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "additionalProperties": {}, + "type": "object" + }, + "x-description-offset": 54 + } + ], + "responses": { + "200": { + "description": "Returned if the edge could be replaced.
" + }, + "202": { + "description": "Returned if the request was successful but waitForSync is false.
" + }, + "404": { + "description": "Returned if no graph with this name, no edge collection or no edge with this id could be found.
" + }, + "412": { + "description": "Returned if if-match header is given, but the documents revision is different.
" + } + }, + "summary": " Replace an edge", + "tags": [ + "Graph" + ], + "x-examples": [], + "x-filename": "Graph - js/apps/system/_api/gharial/APP/gharial.js" + } + }, + "/_api/gharial/{graph-name}/edge/{definition-name}": { + "delete": { + "description": "\n\nRemove one edge definition from the graph. This will only remove the edge collection, the vertex collections remain untouched and can still be used in your queries.

Example:

shell> curl -X DELETE --dump - http://localhost:8529/_api/gharial/social/edge/relation\n\nHTTP/1.1 200 OK\ncontent-type: application/json\netag: 544659911\n\n{ \n  \"error\" : false, \n  \"code\" : 200, \n  \"graph\" : { \n    \"name\" : \"social\", \n    \"edgeDefinitions\" : [ ], \n    \"orphanCollections\" : [ \n      \"female\", \n      \"male\" \n    ], \n    \"_id\" : \"_graphs/social\", \n    \"_rev\" : \"544659911\" \n  } \n}\n

\n

", + "parameters": [], + "responses": { + "200": { + "description": "Returned if the edge definition could be removed from the graph.
" + }, + "400": { + "description": "Returned if no edge definition with this name is found in the graph.
" + }, + "404": { + "description": "Returned if no graph with this name could be found.
" + } + }, + "summary": " Remove an edge definition from the graph", + "tags": [ + "Graph" + ], + "x-examples": [], + "x-filename": "Graph - js/apps/system/_api/gharial/APP/gharial.js" + }, + "post": { + "description": "\n\nChange one specific edge definition. This will modify all occurrences of this definition in all graphs known to your database.
**A json post document with these Properties is required:**
  • to: One or many edge collections that can contain target vertices. of type string
  • from: One or many vertex collections that can contain source vertices. of type string
  • collection: The name of the edge collection to be used.

Example:

shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/gharial/social/edge/relation <<EOF\n{ \n  \"collection\" : \"relation\", \n  \"from\" : [ \n    \"female\", \n    \"male\", \n    \"animal\" \n  ], \n  \"to\" : [ \n    \"female\", \n    \"male\", \n    \"animal\" \n  ] \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json\netag: 593746375\n\n{ \n  \"error\" : false, \n  \"code\" : 200, \n  \"graph\" : { \n    \"name\" : \"social\", \n    \"edgeDefinitions\" : [ \n      { \n        \"collection\" : \"relation\", \n        \"from\" : [ \n          \"animal\", \n          \"female\", \n          \"male\" \n        ], \n        \"to\" : [ \n          \"animal\", \n          \"female\", \n          \"male\" \n        ] \n      } \n    ], \n    \"orphanCollections\" : [ ], \n    \"_id\" : \"_graphs/social\", \n    \"_rev\" : \"593746375\" \n  } \n}\n

\n
", + "parameters": [ + { + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "$ref": "#/definitions/JSF_general_graph_edge_definition_modify_http_examples" + }, + "x-description-offset": 192 + } + ], + "responses": { + "200": { + "description": "Returned if the edge definition could be replaced.
" + }, + "400": { + "description": "Returned if no edge definition with this name is found in the graph.
" + }, + "404": { + "description": "Returned if no graph with this name could be found.
" + } + }, + "summary": " Replace an edge definition", + "tags": [ + "Graph" + ], + "x-examples": [], + "x-filename": "Graph - js/apps/system/_api/gharial/APP/gharial.js" + } + }, + "/_api/gharial/{graph-name}/vertex": { + "get": { + "description": "\n\nLists all vertex collections within this graph.

Example:

shell> curl --dump - http://localhost:8529/_api/gharial/social/vertex\n\nHTTP/1.1 200 OK\ncontent-type: application/json\n\n{ \n  \"error\" : false, \n  \"code\" : 200, \n  \"collections\" : [ \n    \"female\", \n    \"male\" \n  ] \n}\n

\n
", + "parameters": [], + "responses": { + "200": { + "description": "Is returned if the collections could be listed.
" + }, + "404": { + "description": "Returned if no graph with this name could be found.
" + } + }, + "summary": " List vertex collections", + "tags": [ + "Graph" + ], + "x-examples": [], + "x-filename": "Graph - js/apps/system/_api/gharial/APP/gharial.js" + }, + "post": { + "description": "\n\nAdds a vertex collection to the set of collections of the graph. If the collection does not exist, it will be created.

Example:

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/gharial/social/vertex <<EOF\n{ \n  \"collection\" : \"otherVertices\" \n}\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json\netag: 523426247\n\n{ \n  \"error\" : false, \n  \"code\" : 201, \n  \"graph\" : { \n    \"name\" : \"social\", \n    \"edgeDefinitions\" : [ \n      { \n        \"collection\" : \"relation\", \n        \"from\" : [ \n          \"female\", \n          \"male\" \n        ], \n        \"to\" : [ \n          \"female\", \n          \"male\" \n        ] \n      } \n    ], \n    \"orphanCollections\" : [ \n      \"otherVertices\" \n    ], \n    \"_id\" : \"_graphs/social\", \n    \"_rev\" : \"523426247\" \n  } \n}\n

\n
", + "parameters": [], + "responses": { + "201": { + "description": "Returned if the edge collection could be added successfully.
" + }, + "404": { + "description": "Returned if no graph with this name could be found.
" + } + }, + "summary": " Add vertex collection", + "tags": [ + "Graph" + ], + "x-examples": [], + "x-filename": "Graph - js/apps/system/_api/gharial/APP/gharial.js" + } + }, + "/_api/gharial/{graph-name}/vertex/{collection-name}": { + "delete": { + "description": "\n\nRemoves a vertex collection from the graph and optionally deletes the collection, if it is not used in any other graph.

Example: /// You can remove vertex collections that are not used in any edge collection:

shell> curl -X DELETE --dump - http://localhost:8529/_api/gharial/social/vertex/otherVertices\n\nHTTP/1.1 200 OK\ncontent-type: application/json\netag: 588372423\n\n{ \n  \"error\" : false, \n  \"code\" : 200, \n  \"graph\" : { \n    \"name\" : \"social\", \n    \"edgeDefinitions\" : [ \n      { \n        \"collection\" : \"relation\", \n        \"from\" : [ \n          \"female\", \n          \"male\" \n        ], \n        \"to\" : [ \n          \"female\", \n          \"male\" \n        ] \n      } \n    ], \n    \"orphanCollections\" : [ ], \n    \"_id\" : \"_graphs/social\", \n    \"_rev\" : \"588372423\" \n  } \n}\n

\n
Example: You cannot remove vertex collections that are used in edge collections:

shell> curl -X DELETE --dump - http://localhost:8529/_api/gharial/social/vertex/male\n\nHTTP/1.1 400 Bad Request\ncontent-type: application/json\n\n{ \n  \"error\" : true, \n  \"code\" : 400, \n  \"errorNum\" : 1928, \n  \"errorMessage\" : \"not in orphan collection\" \n}\n

\n

", + "parameters": [], + "responses": { + "200": { + "description": "Returned if the vertex collection was removed from the graph successfully.
" + }, + "400": { + "description": "Returned if the vertex collection is still used in an edge definition. In this case it cannot be removed from the graph yet, it has to be removed from the edge definition first.
" + }, + "404": { + "description": "Returned if no graph with this name could be found.
" + } + }, + "summary": " Remove vertex collection", + "tags": [ + "Graph" + ], + "x-examples": [], + "x-filename": "Graph - js/apps/system/_api/gharial/APP/gharial.js" + }, + "post": { + "description": "\n\nAdds a vertex to the given collection.
free style json body
Example:

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/gharial/social/vertex/male <<EOF\n{ \n  \"name\" : \"Francis\" \n}\nEOF\n\nHTTP/1.1 202 Accepted\ncontent-type: application/json\netag: 521918919\n\n{ \n  \"error\" : false, \n  \"code\" : 202, \n  \"vertex\" : { \n    \"_id\" : \"male/521918919\", \n    \"_rev\" : \"521918919\", \n    \"_key\" : \"521918919\" \n  } \n}\n

\n
", + "parameters": [ + { + "description": "The body has to be the JSON object to be stored.
", + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "additionalProperties": {}, + "type": "object" + }, + "x-description-offset": 45 + } + ], + "responses": { + "201": { + "description": "Returned if the vertex could be added and waitForSync is true.
" + }, + "202": { + "description": "Returned if the request was successful but waitForSync is false.
" + }, + "404": { + "description": "Returned if no graph or no vertex collection with this name could be found.
" + } + }, + "summary": " Create a vertex", + "tags": [ + "Graph" + ], + "x-examples": [], + "x-filename": "Graph - js/apps/system/_api/gharial/APP/gharial.js" + } + }, + "/_api/gharial/{graph-name}/vertex/{collection-name}/{vertex-key}": { + "delete": { + "description": "\n\nRemoves a vertex from the collection.

Example:

shell> curl -X DELETE --dump - http://localhost:8529/_api/gharial/social/vertex/female/alice\n\nHTTP/1.1 202 Accepted\ncontent-type: application/json\n\n{ \n  \"error\" : false, \n  \"code\" : 202, \n  \"removed\" : true \n}\n

\n
", + "parameters": [], + "responses": { + "200": { + "description": "Returned if the vertex could be removed.
" + }, + "202": { + "description": "Returned if the request was successful but waitForSync is false.
" + }, + "404": { + "description": "Returned if no graph with this name, no vertex collection or no vertex with this id could be found.
" + }, + "412": { + "description": "Returned if if-match header is given, but the documents revision is different.
" + } + }, + "summary": " Remove a vertex", + "tags": [ + "Graph" + ], + "x-examples": [], + "x-filename": "Graph - js/apps/system/_api/gharial/APP/gharial.js" + }, + "get": { + "description": "\n\nGets a vertex from the given collection.

Example:

shell> curl --dump - http://localhost:8529/_api/gharial/social/vertex/female/alice\n\nHTTP/1.1 200 OK\ncontent-type: application/json\netag: 553966023\n\n{ \n  \"error\" : false, \n  \"code\" : 200, \n  \"vertex\" : { \n    \"_id\" : \"female/alice\", \n    \"_key\" : \"alice\", \n    \"_rev\" : \"553966023\", \n    \"name\" : \"Alice\" \n  } \n}\n

\n
", + "parameters": [], + "responses": { + "200": { + "description": "Returned if the vertex could be found.
" + }, + "404": { + "description": "Returned if no graph with this name, no vertex collection or no vertex with this id could be found.
" + }, + "412": { + "description": "Returned if if-match header is given, but the documents revision is different.
" + } + }, + "summary": " Get a vertex", + "tags": [ + "Graph" + ], + "x-examples": [], + "x-filename": "Graph - js/apps/system/_api/gharial/APP/gharial.js" + }, + "patch": { + "description": "\n\nUpdates the data of the specific vertex in the collection.
free style json body
Example:

shell> curl -X PATCH --data-binary @- --dump - http://localhost:8529/_api/gharial/social/vertex/female/alice <<EOF\n{ \n  \"age\" : 26 \n}\nEOF\n\nHTTP/1.1 202 Accepted\ncontent-type: application/json\netag: 576641479\n\n{ \n  \"error\" : false, \n  \"code\" : 202, \n  \"vertex\" : { \n    \"_id\" : \"female/alice\", \n    \"_rev\" : \"576641479\", \n    \"_oldRev\" : \"574478791\", \n    \"_key\" : \"alice\" \n  } \n}\n

\n
", + "parameters": [ + { + "description": "The body has to contain a JSON object containing exactly the attributes that should be replaced.
", + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "additionalProperties": {}, + "type": "object" + }, + "x-description-offset": 65 + } + ], + "responses": { + "200": { + "description": "Returned if the vertex could be updated.
" + }, + "202": { + "description": "Returned if the request was successful but waitForSync is false.
" + }, + "404": { + "description": "Returned if no graph with this name, no vertex collection or no vertex with this id could be found.
" + }, + "412": { + "description": "Returned if if-match header is given, but the documents revision is different.
" + } + }, + "summary": " Modify a vertex", + "tags": [ + "Graph" + ], + "x-examples": [], + "x-filename": "Graph - js/apps/system/_api/gharial/APP/gharial.js" + }, + "put": { + "description": "\n\nReplaces the data of a vertex in the collection.
free style json body
Example:

shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/gharial/social/vertex/female/alice <<EOF\n{ \n  \"name\" : \"Alice Cooper\", \n  \"age\" : 26 \n}\nEOF\n\nHTTP/1.1 202 Accepted\ncontent-type: application/json\netag: 600496583\n\n{ \n  \"error\" : false, \n  \"code\" : 202, \n  \"vertex\" : { \n    \"_id\" : \"female/alice\", \n    \"_rev\" : \"600496583\", \n    \"_oldRev\" : \"598333895\", \n    \"_key\" : \"alice\" \n  } \n}\n

\n
", + "parameters": [ + { + "description": "The body has to be the JSON object to be stored.
", + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "additionalProperties": {}, + "type": "object" + }, + "x-description-offset": 55 + } + ], + "responses": { + "200": { + "description": "Returned if the vertex could be replaced.
" + }, + "202": { + "description": "Returned if the request was successful but waitForSync is false.
" + }, + "404": { + "description": "Returned if no graph with this name, no vertex collection or no vertex with this id could be found.
" + }, + "412": { + "description": "Returned if if-match header is given, but the documents revision is different.
" + } + }, + "summary": " Replace a vertex", + "tags": [ + "Graph" + ], + "x-examples": [], + "x-filename": "Graph - js/apps/system/_api/gharial/APP/gharial.js" + } + }, + "/_api/import#document": { + "post": { + "description": "free style json body\n\nNOTE Swagger examples won't work due to the anchor.

Creates documents in the collection identified by `collection-name`. The first line of the request body must contain a JSON-encoded array of attribute names. All following lines in the request body must contain JSON-encoded arrays of attribute values. Each line is interpreted as a separate document, and the values specified will be mapped to the array of attribute names specified in the first header line.
The response is a JSON object with the following attributes:
  • `created`: number of documents imported.
  • `errors`: number of documents that were not imported due to an error.
  • `empty`: number of empty lines found in the input (will only contain a value greater zero for types `documents` or `auto`).
  • `updated`: number of updated/replaced documents (in case `onDuplicate` was set to either `update` or `replace`).
  • `ignored`: number of failed but ignored insert operations (in case `onDuplicate` was set to `ignore`).
  • `details`: if URL parameter `details` is set to true, the result will contain a `details` attribute which is an array with more detailed information about which documents could not be inserted.
Note: this API is currently not supported on cluster coordinators.

Example: Importing two documents, with attributes `_key`, `value1` and `value2` each. One line in the import data is empty

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/import?collection=products <<EOF\n[ \"_key\", \"value1\", \"value2\" ]\n[ \"abc\", 25, \"test\" ]\n\n[ \"foo\", \"bar\", \"baz\" ]\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"error\" : false, \n  \"created\" : 2, \n  \"errors\" : 0, \n  \"empty\" : 1, \n  \"updated\" : 0, \n  \"ignored\" : 0 \n}\n

\n
Example: Importing two documents into a new collection

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/import?collection=products&createCollection=true <<EOF\n[ \"value1\", \"value2\" ]\n[ 1234, null ]\n[ \"foo\", \"bar\" ]\n[ 534.55, true ]\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"error\" : false, \n  \"created\" : 3, \n  \"errors\" : 0, \n  \"empty\" : 0, \n  \"updated\" : 0, \n  \"ignored\" : 0 \n}\n

\n
Example: Importing into an edge collection, with attributes `_from`, `_to` and `name`

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/import?collection=links <<EOF\n[ \"_from\", \"_to\", \"name\" ]\n[ \"products/123\", \"products/234\", \"some name\" ]\n[ \"products/332\", \"products/abc\", \"other name\" ]\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"error\" : false, \n  \"created\" : 2, \n  \"errors\" : 0, \n  \"empty\" : 0, \n  \"updated\" : 0, \n  \"ignored\" : 0 \n}\n

\n
Example: Importing into an edge collection, omitting `_from` or `_to`

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/import?collection=links&details=true <<EOF\n[ \"name\" ]\n[ \"some name\" ]\n[ \"other name\" ]\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"error\" : false, \n  \"created\" : 0, \n  \"errors\" : 2, \n  \"empty\" : 0, \n  \"updated\" : 0, \n  \"ignored\" : 0, \n  \"details\" : [ \n    \"at position 1: missing '_from' or '_to' attribute, offending document: {\\\"name\\\":\\\"some name\\\"}\", \n    \"at position 2: missing '_from' or '_to' attribute, offending document: {\\\"name\\\":\\\"other name\\\"}\" \n  ] \n}\n

\n
Example: Violating a unique constraint, but allow partial imports

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/import?collection=products&details=true <<EOF\n[ \"_key\", \"value1\", \"value2\" ]\n[ \"abc\", 25, \"test\" ]\n[ \"abc\", \"bar\", \"baz\" ]\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"error\" : false, \n  \"created\" : 1, \n  \"errors\" : 1, \n  \"empty\" : 0, \n  \"updated\" : 0, \n  \"ignored\" : 0, \n  \"details\" : [ \n    \"at position 2: creating document failed with error 'unique constraint violated', offending document: {\\\"_key\\\":\\\"abc\\\",\\\"value1\\\":\\\"bar\\\",\\\"value2\\\":\\\"baz\\\"}\" \n  ] \n}\n

\n
Example: Violating a unique constraint, not allowing partial imports

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/import?collection=products&complete=true <<EOF\n[ \"_key\", \"value1\", \"value2\" ]\n[ \"abc\", 25, \"test\" ]\n[ \"abc\", \"bar\", \"baz\" ]\nEOF\n\nHTTP/1.1 409 Conflict\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"error\" : true, \n  \"errorMessage\" : \"cannot create document, unique constraint violated\", \n  \"code\" : 409, \n  \"errorNum\" : 1210 \n}\n

\n
Example: Using a non-existing collection

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/import?collection=products <<EOF\n[ \"_key\", \"value1\", \"value2\" ]\n[ \"abc\", 25, \"test\" ]\n[ \"foo\", \"bar\", \"baz\" ]\nEOF\n\nHTTP/1.1 404 Not Found\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"error\" : true, \n  \"errorMessage\" : \"collection 'products' not found\", \n  \"code\" : 404, \n  \"errorNum\" : 1203 \n}\n

\n
Example: Using a malformed body

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/import?collection=products <<EOF\n{ \"_key\": \"foo\", \"value1\": \"bar\" }\nEOF\n\nHTTP/1.1 400 Bad Request\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"error\" : true, \n  \"errorMessage\" : \"no JSON array found in second line\", \n  \"code\" : 400, \n  \"errorNum\" : 400 \n}\n

\n
", + "parameters": [ + { + "description": "The body must consist of JSON-encoded arrays of attribute values, with one line per document. The first row of the request must be a JSON-encoded array of attribute names. These attribute names are used for the data in the subsequent lines.
", + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "additionalProperties": {}, + "type": "object" + }, + "x-description-offset": 0 + }, + { + "description": "The collection name.
", + "in": "query", + "name": "collection", + "required": true, + "type": "string" + }, + { + "description": "If this parameter has a value of `true` or `yes`, then the collection is created if it does not yet exist. Other values will be ignored so the collection must be present for the operation to succeed.
", + "in": "query", + "name": "createCollection", + "required": false, + "type": "boolean" + }, + { + "description": "If this parameter has a value of `true` or `yes`, then all data in the collection will be removed prior to the import. Note that any existing index definitions will be preseved.
", + "in": "query", + "name": "overwrite", + "required": false, + "type": "boolean" + }, + { + "description": "Wait until documents have been synced to disk before returning.
", + "in": "query", + "name": "waitForSync", + "required": false, + "type": "boolean" + }, + { + "description": "Controls what action is carried out in case of a unique key constraint violation. Possible values are:
  • error: this will not import the current document because of the unique key constraint violation. This is the default setting.
  • update: this will update an existing document in the database with the data specified in the request. Attributes of the existing document that are not present in the request will be preseved.
  • replace: this will replace an existing document in the database with the data specified in the request.
  • ignore: this will not update an existing document and simply ignore the error caused by the unique key constraint violation.
Note that update, replace and ignore will only work when the import document in the request contains the _key attribute. update and replace may also fail because of secondary unique key constraint violations.
", + "in": "query", + "name": "onDuplicate", + "required": false, + "type": "string" + }, + { + "description": "If set to `true` or `yes`, it will make the whole import fail if any error occurs. Otherwise the import will continue even if some documents cannot be imported.
", + "in": "query", + "name": "complete", + "required": false, + "type": "boolean" + }, + { + "description": "If set to `true` or `yes`, the result will include an attribute `details` with details about documents that could not be imported.
", + "in": "query", + "name": "details", + "required": false, + "type": "boolean" + } + ], + "responses": { + "201": { + "description": "is returned if all documents could be imported successfully.
" + }, + "400": { + "description": "is returned if `type` contains an invalid value, no `collection` is specified, the documents are incorrectly encoded, or the request is malformed.
" + }, + "404": { + "description": "is returned if `collection` or the `_from` or `_to` attributes of an imported edge refer to an unknown collection.
" + }, + "409": { + "description": "is returned if the import would trigger a unique key violation and `complete` is set to `true`.
" + }, + "500": { + "description": "is returned if the server cannot auto-generate a document key (out of keys error) for a document with no user-defined key.
" + }, + "501": { + "description": "The server will respond with HTTP 501 if this API is called on a cluster coordinator.
" + } + }, + "summary": "imports document values", + "tags": [ + "Bulk" + ], + "x-examples": [], + "x-filename": "Bulk - arangod/RestHandler/RestExportHandler.cpp, arangod/RestHandler/RestImportHandler.cpp, arangod/RestHandler/RestBatchHandler.cpp" + } + }, + "/_api/import#json": { + "post": { + "description": "free style json body\n\nNOTE Swagger examples won't work due to the anchor.

Creates documents in the collection identified by `collection-name`. The JSON representations of the documents must be passed as the body of the POST request. The request body can either consist of multiple lines, with each line being a single stand-alone JSON object, or a singe JSON array with sub-objects.
The response is a JSON object with the following attributes:
  • `created`: number of documents imported.
  • `errors`: number of documents that were not imported due to an error.
  • `empty`: number of empty lines found in the input (will only contain a value greater zero for types `documents` or `auto`).
  • `updated`: number of updated/replaced documents (in case `onDuplicate` was set to either `update` or `replace`).
  • `ignored`: number of failed but ignored insert operations (in case `onDuplicate` was set to `ignore`).
  • `details`: if URL parameter `details` is set to true, the result will contain a `details` attribute which is an array with more detailed information about which documents could not be inserted.
Note: this API is currently not supported on cluster coordinators.

Example: Importing documents with heterogenous attributes from a JSON array

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/import?collection=products&type=list <<EOF\n[ \n  { \n    \"_key\" : \"abc\", \n    \"value1\" : 25, \n    \"value2\" : \"test\", \n    \"allowed\" : true \n  }, \n  { \n    \"_key\" : \"foo\", \n    \"name\" : \"baz\" \n  }, \n  { \n    \"name\" : { \n      \"detailed\" : \"detailed name\", \n      \"short\" : \"short name\" \n    } \n  } \n]\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"error\" : false, \n  \"created\" : 3, \n  \"errors\" : 0, \n  \"empty\" : 0, \n  \"updated\" : 0, \n  \"ignored\" : 0 \n}\n

\n
Example: Importing documents from individual JSON lines

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/import?collection=products&type=documents <<EOF\n{ \"_key\": \"abc\", \"value1\": 25, \"value2\": \"test\", \"allowed\": true }\n{ \"_key\": \"foo\", \"name\": \"baz\" }\n\n{ \"name\": { \"detailed\": \"detailed name\", \"short\": \"short name\" } }\n\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"error\" : false, \n  \"created\" : 3, \n  \"errors\" : 0, \n  \"empty\" : 1, \n  \"updated\" : 0, \n  \"ignored\" : 0 \n}\n

\n
Example: Using the auto type detection

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/import?collection=products&type=auto <<EOF\n[ \n  { \n    \"_key\" : \"abc\", \n    \"value1\" : 25, \n    \"value2\" : \"test\", \n    \"allowed\" : true \n  }, \n  { \n    \"_key\" : \"foo\", \n    \"name\" : \"baz\" \n  }, \n  { \n    \"name\" : { \n      \"detailed\" : \"detailed name\", \n      \"short\" : \"short name\" \n    } \n  } \n]\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"error\" : false, \n  \"created\" : 3, \n  \"errors\" : 0, \n  \"empty\" : 0, \n  \"updated\" : 0, \n  \"ignored\" : 0 \n}\n

\n
Example: Importing documents into a new collection from a JSON array

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/import?collection=products&createCollection=true&type=list <<EOF\n[ \n  { \n    \"id\" : \"12553\", \n    \"active\" : true \n  }, \n  { \n    \"id\" : \"4433\", \n    \"active\" : false \n  }, \n  { \n    \"id\" : \"55932\", \n    \"count\" : 4334 \n  } \n]\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"error\" : false, \n  \"created\" : 3, \n  \"errors\" : 0, \n  \"empty\" : 0, \n  \"updated\" : 0, \n  \"ignored\" : 0 \n}\n

\n
Example: Importing into an edge collection, with attributes `_from`, `_to` and `name`

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/import?collection=links&type=documents <<EOF\n{ \"_from\": \"products/123\", \"_to\": \"products/234\" }\n{ \"_from\": \"products/332\", \"_to\": \"products/abc\", \"name\": \"other name\" }\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"error\" : false, \n  \"created\" : 2, \n  \"errors\" : 0, \n  \"empty\" : 0, \n  \"updated\" : 0, \n  \"ignored\" : 0 \n}\n

\n
Example: Importing into an edge collection, omitting `_from` or `_to`

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/import?collection=links&type=list&details=true <<EOF\n[ \n  { \n    \"name\" : \"some name\" \n  } \n]\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"error\" : false, \n  \"created\" : 0, \n  \"errors\" : 1, \n  \"empty\" : 0, \n  \"updated\" : 0, \n  \"ignored\" : 0, \n  \"details\" : [ \n    \"at position 1: missing '_from' or '_to' attribute, offending document: {\\\"name\\\":\\\"some name\\\"}\" \n  ] \n}\n

\n
Example: Violating a unique constraint, but allow partial imports

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/import?collection=products&type=documents&details=true <<EOF\n{ \"_key\": \"abc\", \"value1\": 25, \"value2\": \"test\" }\n{ \"_key\": \"abc\", \"value1\": \"bar\", \"value2\": \"baz\" }\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"error\" : false, \n  \"created\" : 1, \n  \"errors\" : 1, \n  \"empty\" : 0, \n  \"updated\" : 0, \n  \"ignored\" : 0, \n  \"details\" : [ \n    \"at position 2: creating document failed with error 'unique constraint violated', offending document: {\\\"_key\\\":\\\"abc\\\",\\\"value1\\\":\\\"bar\\\",\\\"value2\\\":\\\"baz\\\"}\" \n  ] \n}\n

\n
Example: Violating a unique constraint, not allowing partial imports

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/import?collection=products&type=documents&complete=true <<EOF\n{ \"_key\": \"abc\", \"value1\": 25, \"value2\": \"test\" }\n{ \"_key\": \"abc\", \"value1\": \"bar\", \"value2\": \"baz\" }\nEOF\n\nHTTP/1.1 409 Conflict\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"error\" : true, \n  \"errorMessage\" : \"cannot create document, unique constraint violated\", \n  \"code\" : 409, \n  \"errorNum\" : 1210 \n}\n

\n
Example: Using a non-existing collection

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/import?collection=products&type=documents <<EOF\n{ \"name\": \"test\" }\nEOF\n\nHTTP/1.1 404 Not Found\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"error\" : true, \n  \"errorMessage\" : \"collection 'products' not found\", \n  \"code\" : 404, \n  \"errorNum\" : 1203 \n}\n

\n
Example: Using a malformed body

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/import?collection=products&type=list <<EOF\n{ }\nEOF\n\nHTTP/1.1 400 Bad Request\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"error\" : true, \n  \"errorMessage\" : \"expecting a JSON array in the request\", \n  \"code\" : 400, \n  \"errorNum\" : 400 \n}\n

\n
", + "parameters": [ + { + "description": "The body must either be a JSON-encoded array of objects or a string with multiple JSON objects separated by newlines.
", + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "additionalProperties": {}, + "type": "object" + }, + "x-description-offset": 0 + }, + { + "description": "Determines how the body of the request will be interpreted. `type` can have the following values:
  • `documents`: when this type is used, each line in the request body is expected to be an individual JSON-encoded document. Multiple JSON objects in the request body need to be separated by newlines.
  • `list`: when this type is used, the request body must contain a single JSON-encoded array of individual objects to import.
  • `auto`: if set, this will automatically determine the body type (either `documents` or `list`).
", + "in": "query", + "name": "type", + "required": true, + "type": "string" + }, + { + "description": "The collection name.
", + "in": "query", + "name": "collection", + "required": true, + "type": "string" + }, + { + "description": "If this parameter has a value of `true` or `yes`, then the collection is created if it does not yet exist. Other values will be ignored so the collection must be present for the operation to succeed.
", + "in": "query", + "name": "createCollection", + "required": false, + "type": "boolean" + }, + { + "description": "If this parameter has a value of `true` or `yes`, then all data in the collection will be removed prior to the import. Note that any existing index definitions will be preseved.
", + "in": "query", + "name": "overwrite", + "required": false, + "type": "boolean" + }, + { + "description": "Wait until documents have been synced to disk before returning.
", + "in": "query", + "name": "waitForSync", + "required": false, + "type": "boolean" + }, + { + "description": "Controls what action is carried out in case of a unique key constraint violation. Possible values are:
  • error: this will not import the current document because of the unique key constraint violation. This is the default setting.
  • update: this will update an existing document in the database with the data specified in the request. Attributes of the existing document that are not present in the request will be preseved.
  • replace: this will replace an existing document in the database with the data specified in the request.
  • ignore: this will not update an existing document and simply ignore the error caused by a unique key constraint violation.
Note that that update, replace and ignore will only work when the import document in the request contains the _key attribute. update and replace may also fail because of secondary unique key constraint violations.
", + "in": "query", + "name": "onDuplicate", + "required": false, + "type": "string" + }, + { + "description": "If set to `true` or `yes`, it will make the whole import fail if any error occurs. Otherwise the import will continue even if some documents cannot be imported.
", + "in": "query", + "name": "complete", + "required": false, + "type": "boolean" + }, + { + "description": "If set to `true` or `yes`, the result will include an attribute `details` with details about documents that could not be imported.
", + "in": "query", + "name": "details", + "required": false, + "type": "boolean" + } + ], + "responses": { + "201": { + "description": "is returned if all documents could be imported successfully.
" + }, + "400": { + "description": "is returned if `type` contains an invalid value, no `collection` is specified, the documents are incorrectly encoded, or the request is malformed.
" + }, + "404": { + "description": "is returned if `collection` or the `_from` or `_to` attributes of an imported edge refer to an unknown collection.
" + }, + "409": { + "description": "is returned if the import would trigger a unique key violation and `complete` is set to `true`.
" + }, + "500": { + "description": "is returned if the server cannot auto-generate a document key (out of keys error) for a document with no user-defined key.
" + }, + "501": { + "description": "The server will respond with HTTP 501 if this API is called on a cluster coordinator.
" + } + }, + "summary": "imports documents from JSON", + "tags": [ + "Bulk" + ], + "x-examples": [], + "x-filename": "Bulk - arangod/RestHandler/RestExportHandler.cpp, arangod/RestHandler/RestImportHandler.cpp, arangod/RestHandler/RestBatchHandler.cpp" + } + }, + "/_api/index": { + "get": { + "description": "\n\n
Returns an object with an attribute indexes containing an array of all index descriptions for the given collection. The same information is also available in the identifiers as an object with the index handles as keys.

Example: Return information about all indexes

shell> curl --dump - http://localhost:8529/_api/index?collection=products\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"indexes\" : [ \n    { \n      \"id\" : \"products/0\", \n      \"type\" : \"primary\", \n      \"fields\" : [ \n        \"_key\" \n      ], \n      \"selectivityEstimate\" : 1, \n      \"unique\" : true, \n      \"sparse\" : false \n    }, \n    { \n      \"id\" : \"products/758438343\", \n      \"type\" : \"hash\", \n      \"fields\" : [ \n        \"name\" \n      ], \n      \"selectivityEstimate\" : 1, \n      \"unique\" : false, \n      \"sparse\" : false \n    }, \n    { \n      \"id\" : \"products/758700487\", \n      \"type\" : \"skiplist\", \n      \"fields\" : [ \n        \"price\" \n      ], \n      \"unique\" : false, \n      \"sparse\" : true \n    } \n  ], \n  \"identifiers\" : { \n    \"products/0\" : { \n      \"id\" : \"products/0\", \n      \"type\" : \"primary\", \n      \"fields\" : [ \n        \"_key\" \n      ], \n      \"selectivityEstimate\" : 1, \n      \"unique\" : true, \n      \"sparse\" : false \n    }, \n    \"products/758438343\" : { \n      \"id\" : \"products/758438343\", \n      \"type\" : \"hash\", \n      \"fields\" : [ \n        \"name\" \n      ], \n      \"selectivityEstimate\" : 1, \n      \"unique\" : false, \n      \"sparse\" : false \n    }, \n    \"products/758700487\" : { \n      \"id\" : \"products/758700487\", \n      \"type\" : \"skiplist\", \n      \"fields\" : [ \n        \"price\" \n      ], \n      \"unique\" : false, \n      \"sparse\" : true \n    } \n  }, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
", + "parameters": [ + { + "description": "The collection name.
", + "in": "query", + "name": "collection", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "returns a json object containing a list of indexes on that collection.
" + } + }, + "summary": " Read all indexes of a collection", + "tags": [ + "Indexes" + ], + "x-examples": [], + "x-filename": "Indexes - js/actions/api-index.js" + }, + "post": { + "description": "**A json post document with these Properties is required:**
  • fields: an array of attribute paths. of type string
  • unique: if true, then create a unique index.
  • type: must be equal to \"skiplist\".
  • sparse: if true, then create a sparse index.
\n\n
Creates a skip-list index for the collection collection-name, if it does not already exist. The call expects an object containing the index details.
In a sparse index all documents will be excluded from the index that do not contain at least one of the specified index attributes (i.e. fields) or that have a value of null in any of the specified index attributes. Such documents will not be indexed, and not be taken into account for uniqueness checks if the unique flag is set.
In a non-sparse index, these documents will be indexed (for non-present indexed attributes, a value of null will be used) and will be taken into account for uniqueness checks if the unique flag is set.
Note: unique indexes on non-shard keys are not supported in a cluster.

Example: Creating a skiplist index

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/index?collection=products <<EOF\n{ \n  \"type\" : \"skiplist\", \n  \"unique\" : false, \n  \"fields\" : [ \n    \"a\", \n    \"b\" \n  ] \n}\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"id\" : \"products/761715143\", \n  \"type\" : \"skiplist\", \n  \"fields\" : [ \n    \"a\", \n    \"b\" \n  ], \n  \"unique\" : false, \n  \"sparse\" : false, \n  \"isNewlyCreated\" : true, \n  \"error\" : false, \n  \"code\" : 201 \n}\n

\n
Example: Creating a sparse skiplist index

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/index?collection=products <<EOF\n{ \n  \"type\" : \"skiplist\", \n  \"unique\" : false, \n  \"sparse\" : true, \n  \"fields\" : [ \n    \"a\" \n  ] \n}\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"id\" : \"products/763222471\", \n  \"type\" : \"skiplist\", \n  \"fields\" : [ \n    \"a\" \n  ], \n  \"unique\" : false, \n  \"sparse\" : true, \n  \"isNewlyCreated\" : true, \n  \"error\" : false, \n  \"code\" : 201 \n}\n

\n
", + "parameters": [ + { + "description": "The collection name.

", + "in": "query", + "name": "collection-name", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "$ref": "#/definitions/JSF_post_api_index_skiplist" + }, + "x-description-offset": 59 + } + ], + "responses": { + "200": { + "description": "If the index already exists, then a HTTP 200 is returned.
" + }, + "201": { + "description": "If the index does not already exist and could be created, then a HTTP 201 is returned.
" + }, + "400": { + "description": "If the collection already contains documents and you try to create a unique skip-list index in such a way that there are documents violating the uniqueness, then a HTTP 400 is returned.
" + }, + "404": { + "description": "If the collection-name is unknown, then a HTTP 404 is returned.
" + } + }, + "summary": " Create skip list", + "tags": [ + "Indexes" + ], + "x-examples": [], + "x-filename": "Indexes - js/actions/api-index.js" + } + }, + "/_api/index#CapConstraints": { + "post": { + "description": "**A json post document with these Properties is required:**
  • byteSize: The maximal size of the active document data in the collection (in bytes). If specified, the value must be at least 16384.

  • type: must be equal to \"cap\".
  • size: The maximal number of documents for the collection. If specified, the value must be greater than zero.
\n\nNOTE Swagger examples won't work due to the anchor.


Creates a cap constraint for the collection collection-name, if it does not already exist. Expects an object containing the index details.
Note: The cap constraint does not index particular attributes of the documents in a collection, but limits the number of documents in the collection to a maximum value. The cap constraint thus does not support attribute names specified in the fields attribute nor uniqueness of any kind via the unique attribute.
It is allowed to specify either size or byteSize, or both at the same time. If both are specified, then the automatic document removal will be triggered by the first non-met constraint.

Example: Creating a cap constraint

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/index?collection=products <<EOF\n{ \n  \"type\" : \"cap\", \n  \"size\" : 10 \n}\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"id\" : \"products/760142279\", \n  \"type\" : \"cap\", \n  \"size\" : 10, \n  \"byteSize\" : 0, \n  \"unique\" : false, \n  \"isNewlyCreated\" : true, \n  \"error\" : false, \n  \"code\" : 201 \n}\n

\n
", + "parameters": [ + { + "description": "The collection name.
", + "in": "query", + "name": "collection", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "$ref": "#/definitions/JSF_post_api_index_cap" + }, + "x-description-offset": 59 + } + ], + "responses": { + "200": { + "description": "If the index already exists, then an HTTP 200 is returned.
" + }, + "201": { + "description": "If the index does not already exist and could be created, then an HTTP 201 is returned.
" + }, + "400": { + "description": "If either size or byteSize contain invalid values, then an HTTP 400 is returned.
" + }, + "404": { + "description": "If the collection-name is unknown, then a HTTP 404 is returned.
" + } + }, + "summary": " Create cap constraint", + "tags": [ + "Indexes" + ], + "x-examples": [], + "x-filename": "Indexes - js/actions/api-index.js" + } + }, + "/_api/index#fulltext": { + "post": { + "description": "**A json post document with these Properties is required:**
  • fields: an array of attribute names. Currently, the array is limited to exactly one attribute. of type string
  • type: must be equal to \"fulltext\".
  • minLength: Minimum character length of words to index. Will default to a server-defined value if unspecified. It is thus recommended to set this value explicitly when creating the index.
\n\nNOTE Swagger examples won't work due to the anchor.

Creates a fulltext index for the collection collection-name, if it does not already exist. The call expects an object containing the index details.

Example: Creating a fulltext index

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/index?collection=products <<EOF\n{ \n  \"type\" : \"fulltext\", \n  \"fields\" : [ \n    \"text\" \n  ] \n}\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"id\" : \"products/760601031\", \n  \"type\" : \"fulltext\", \n  \"fields\" : [ \n    \"text\" \n  ], \n  \"unique\" : false, \n  \"sparse\" : true, \n  \"minLength\" : 2, \n  \"isNewlyCreated\" : true, \n  \"error\" : false, \n  \"code\" : 201 \n}\n

\n
", + "parameters": [ + { + "description": "The collection name.
", + "in": "query", + "name": "collection-name", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "$ref": "#/definitions/JSF_post_api_index_fulltext" + }, + "x-description-offset": 59 + } + ], + "responses": { + "200": { + "description": "If the index already exists, then a HTTP 200 is returned.
" + }, + "201": { + "description": "If the index does not already exist and could be created, then a HTTP 201 is returned.
" + }, + "404": { + "description": "If the collection-name is unknown, then a HTTP 404 is returned.
" + } + }, + "summary": " Create fulltext index", + "tags": [ + "Indexes" + ], + "x-examples": [], + "x-filename": "Indexes - js/actions/api-index.js" + } + }, + "/_api/index#general": { + "post": { + "description": "free style json body\n\nNOTE Swagger examples won't work due to the anchor.

Creates a new index in the collection collection. Expects an object containing the index details.
The type of the index to be created must specified in the type attribute of the index details. Depending on the index type, additional other attributes may need to specified in the request in order to create the index.
Most indexes (a notable exception being the cap constraint) require the array of attributes to be indexed in the fields attribute of the index details. Depending on the index type, a single attribute or multiple attributes can be indexed.
Indexing system attributes such as _id, _key, _from, and _to is not supported for user-defined indexes. Manually creating an index using any of these attributes will fail with an error.
Some indexes can be created as unique or non-unique variants. Uniqueness can be controlled for most indexes by specifying the unique flag in the index details. Setting it to true will create a unique index. Setting it to false or omitting the unique attribute will create a non-unique index.
Note: The following index types do not support uniqueness, and using the unique attribute with these types may lead to an error:
  • cap constraints
  • fulltext indexes
Note: Unique indexes on non-shard keys are not supported in a cluster.
Hash and skiplist indexes can optionally be created in a sparse variant. A sparse index will be created if the sparse attribute in the index details is set to true. Sparse indexes do not index documents for which any of the index attributes is either not set or is null.
", + "parameters": [ + { + "description": "The collection name.
", + "in": "query", + "name": "collection", + "required": true, + "type": "string" + }, + { + "description": "
", + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "additionalProperties": {}, + "type": "object" + }, + "x-description-offset": 0 + } + ], + "responses": { + "200": { + "description": "If the index already exists, then an HTTP 200 is returned.
" + }, + "201": { + "description": "If the index does not already exist and could be created, then an HTTP 201 is returned.
" + }, + "400": { + "description": "If an invalid index description is posted or attributes are used that the target index will not support, then an HTTP 400 is returned.
" + }, + "404": { + "description": "If collection is unknown, then an HTTP 404 is returned.
" + } + }, + "summary": " Create index", + "tags": [ + "Indexes" + ], + "x-examples": [], + "x-filename": "Indexes - js/actions/api-index.js" + } + }, + "/_api/index#geo": { + "post": { + "description": "**A json post document with these Properties is required:**
  • fields: An array with one or two attribute paths.
    If it is an array with one attribute path location, then a geo-spatial index on all documents is created using location as path to the coordinates. The value of the attribute must be an array with at least two double values. The array must contain the latitude (first value) and the longitude (second value). All documents, which do not have the attribute path or with value that are not suitable, are ignored.
    If it is an array with two attribute paths latitude and longitude, then a geo-spatial index on all documents is created using latitude and longitude as paths the latitude and the longitude. The value of the attribute latitude and of the attribute longitude must a double. All documents, which do not have the attribute paths or which values are not suitable, are ignored. of type string
  • type: must be equal to \"geo\".
  • geoJson: If a geo-spatial index on a location is constructed and geoJson is true, then the order within the array is longitude followed by latitude. This corresponds to the format described in http://geojson.org/geojson-spec.html#positions
\n\nNOTE Swagger examples won't work due to the anchor.

Creates a geo-spatial index in the collection collection-name, if it does not already exist. Expects an object containing the index details.
Geo indexes are always sparse, meaning that documents that do not contain the index attributes or have non-numeric values in the index attributes will not be indexed.

Example: Creating a geo index with a location attribute

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/index?collection=products <<EOF\n{ \n  \"type\" : \"geo\", \n  \"fields\" : [ \n    \"b\" \n  ] \n}\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"id\" : \"products/759749063\", \n  \"type\" : \"geo1\", \n  \"fields\" : [ \n    \"b\" \n  ], \n  \"geoJson\" : false, \n  \"constraint\" : false, \n  \"unique\" : false, \n  \"ignoreNull\" : true, \n  \"sparse\" : true, \n  \"isNewlyCreated\" : true, \n  \"error\" : false, \n  \"code\" : 201 \n}\n

\n
Example: Creating a geo index with latitude and longitude attributes

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/index?collection=products <<EOF\n{ \n  \"type\" : \"geo\", \n  \"fields\" : [ \n    \"e\", \n    \"f\" \n  ] \n}\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"id\" : \"products/759290311\", \n  \"type\" : \"geo2\", \n  \"fields\" : [ \n    \"e\", \n    \"f\" \n  ], \n  \"constraint\" : false, \n  \"unique\" : false, \n  \"ignoreNull\" : true, \n  \"sparse\" : true, \n  \"isNewlyCreated\" : true, \n  \"error\" : false, \n  \"code\" : 201 \n}\n

\n
", + "parameters": [ + { + "description": "The collection name.

", + "in": "query", + "name": "collection", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "$ref": "#/definitions/JSF_post_api_index_geo" + }, + "x-description-offset": 59 + } + ], + "responses": { + "200": { + "description": "If the index already exists, then a HTTP 200 is returned.
" + }, + "201": { + "description": "If the index does not already exist and could be created, then a HTTP 201 is returned.
" + }, + "404": { + "description": "If the collection-name is unknown, then a HTTP 404 is returned.
" + } + }, + "summary": " Create geo-spatial index", + "tags": [ + "Indexes" + ], + "x-examples": [], + "x-filename": "Indexes - js/actions/api-index.js" + } + }, + "/_api/index#hash": { + "post": { + "description": "**A json post document with these Properties is required:**
  • fields: an array of attribute paths. of type string
  • unique: if true, then create a unique index.
  • type: must be equal to \"hash\".
  • sparse: if true, then create a sparse index.
\n\nNOTE Swagger examples won't work due to the anchor.

Creates a hash index for the collection collection-name if it does not already exist. The call expects an object containing the index details.
In a sparse index all documents will be excluded from the index that do not contain at least one of the specified index attributes (i.e. fields) or that have a value of null in any of the specified index attributes. Such documents will not be indexed, and not be taken into account for uniqueness checks if the unique flag is set.
In a non-sparse index, these documents will be indexed (for non-present indexed attributes, a value of null will be used) and will be taken into account for uniqueness checks if the unique flag is set.
Note: unique indexes on non-shard keys are not supported in a cluster.

Example: Creating an unique constraint

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/index?collection=products <<EOF\n{ \n  \"type\" : \"hash\", \n  \"unique\" : true, \n  \"fields\" : [ \n    \"a\", \n    \"b\" \n  ] \n}\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"id\" : \"products/762239431\", \n  \"type\" : \"hash\", \n  \"fields\" : [ \n    \"a\", \n    \"b\" \n  ], \n  \"selectivityEstimate\" : 1, \n  \"unique\" : true, \n  \"sparse\" : false, \n  \"isNewlyCreated\" : true, \n  \"error\" : false, \n  \"code\" : 201 \n}\n

\n
Example: Creating a non-unique hash index

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/index?collection=products <<EOF\n{ \n  \"type\" : \"hash\", \n  \"unique\" : false, \n  \"fields\" : [ \n    \"a\", \n    \"b\" \n  ] \n}\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"id\" : \"products/761190855\", \n  \"type\" : \"hash\", \n  \"fields\" : [ \n    \"a\", \n    \"b\" \n  ], \n  \"selectivityEstimate\" : 1, \n  \"unique\" : false, \n  \"sparse\" : false, \n  \"isNewlyCreated\" : true, \n  \"error\" : false, \n  \"code\" : 201 \n}\n

\n
Example: Creating a sparse index

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/index?collection=products <<EOF\n{ \n  \"type\" : \"hash\", \n  \"unique\" : false, \n  \"sparse\" : true, \n  \"fields\" : [ \n    \"a\" \n  ] \n}\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"id\" : \"products/762698183\", \n  \"type\" : \"hash\", \n  \"fields\" : [ \n    \"a\" \n  ], \n  \"selectivityEstimate\" : 1, \n  \"unique\" : false, \n  \"sparse\" : true, \n  \"isNewlyCreated\" : true, \n  \"error\" : false, \n  \"code\" : 201 \n}\n

\n
", + "parameters": [ + { + "description": "The collection name.
", + "in": "query", + "name": "collection-name", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "$ref": "#/definitions/JSF_post_api_index_hash" + }, + "x-description-offset": 59 + } + ], + "responses": { + "200": { + "description": "If the index already exists, then a HTTP 200 is returned.
" + }, + "201": { + "description": "If the index does not already exist and could be created, then a HTTP 201 is returned.
" + }, + "400": { + "description": "If the collection already contains documents and you try to create a unique hash index in such a way that there are documents violating the uniqueness, then a HTTP 400 is returned.
" + }, + "404": { + "description": "If the collection-name is unknown, then a HTTP 404 is returned.
" + } + }, + "summary": " Create hash index", + "tags": [ + "Indexes" + ], + "x-examples": [], + "x-filename": "Indexes - js/actions/api-index.js" + } + }, + "/_api/index/{index-handle}": { + "delete": { + "description": "\n\n
Deletes an index with index-handle.

Example:

shell> curl -X DELETE --dump - http://localhost:8529/_api/index/products/763746759\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"id\" : \"products/763746759\", \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
", + "parameters": [ + { + "description": "The index handle.
", + "format": "string", + "in": "path", + "name": "index-handle", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "If the index could be deleted, then an HTTP 200 is returned.
" + }, + "404": { + "description": "If the index-handle is unknown, then an HTTP 404 is returned." + } + }, + "summary": " Delete index", + "tags": [ + "Indexes" + ], + "x-examples": [], + "x-filename": "Indexes - js/actions/api-index.js" + }, + "get": { + "description": "\n\n
The result is an object describing the index. It has at least the following attributes:
  • id: the identifier of the index
  • type: the index type
All other attributes are type-dependent. For example, some indexes provide unique or sparse flags, whereas others don't. Some indexes also provide a selectivity estimate in the selectivityEstimate attribute of the result.

Example:

shell> curl --dump - http://localhost:8529/_api/index/products/0\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"id\" : \"products/0\", \n  \"type\" : \"primary\", \n  \"fields\" : [ \n    \"_key\" \n  ], \n  \"selectivityEstimate\" : 1, \n  \"unique\" : true, \n  \"sparse\" : false, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
", + "parameters": [ + { + "description": "The index-handle.
", + "format": "string", + "in": "path", + "name": "index-handle", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "If the index exists, then a HTTP 200 is returned.
" + }, + "404": { + "description": "If the index does not exist, then a HTTP 404 is returned.
" + } + }, + "summary": "Read index", + "tags": [ + "Indexes" + ], + "x-examples": [], + "x-filename": "Indexes - js/actions/api-index.js" + } + }, + "/_api/job/{job-id}": { + "get": { + "description": "\n\nReturns the processing status of the specified job. The processing status can be determined by peeking into the HTTP response code of the response.

Example: Querying the status of a done job:

shell> curl -X PUT --header 'x-arango-async: store' --dump - http://localhost:8529/_api/version\n\nHTTP/1.1 202 Accepted\ncontent-type: text/plain; charset=utf-8\nx-arango-async-id: 603314631\n\nshell> curl -X PUT --dump - http://localhost:8529/_api/job/603314631\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\nx-arango-async-id: 603314631\n\n{ \n  \"server\" : \"arango\", \n  \"version\" : \"2.7.0-devel\" \n}\n

\n
Example: Querying the status of a pending job: (we create a sleep job therefore...)

shell> curl --header 'x-arango-async: store' --dump - http://localhost:8529/_admin/sleep?duration=30\n\nHTTP/1.1 202 Accepted\ncontent-type: text/plain; charset=utf-8\nx-arango-async-id: 603380167\n\nshell> curl --dump - http://localhost:8529/_api/job/603380167\n\nHTTP/1.1 204 No Content\ncontent-type: text/plain; charset=utf-8\n\n

\n
", + "parameters": [ + { + "description": "The async job id.
", + "format": "string", + "in": "path", + "name": "job-id", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "is returned if the job requested via job-id has been executed and its result is ready to fetch.
" + }, + "204": { + "description": "is returned if the job requested via job-id is still in the queue of pending (or not yet finished) jobs.
" + }, + "404": { + "description": "is returned if the job was not found or already deleted or fetched from the job result list.
" + } + }, + "summary": " Returns async job", + "tags": [ + "job" + ], + "x-examples": [], + "x-filename": "job - arangod/HttpServer/AsyncJobManager.cpp, arangod/RestHandler/RestJobHandler.cpp" + }, + "put": { + "description": "\n\nReturns the result of an async job identified by job-id. If the async job result is present on the server, the result will be removed from the list of result. That means this method can be called for each job-id once. The method will return the original job result's headers and body, plus the additional HTTP header x-arango-async-job-id. If this header is present, then the job was found and the response contains the original job's result. If the header is not present, the job was not found and the response contains status information from the job manager.

Example: Not providing a job-id:

shell> curl -X PUT --dump - http://localhost:8529/_api/job\n\nHTTP/1.1 400 Bad Request\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"error\" : true, \n  \"errorMessage\" : \"bad parameter\", \n  \"code\" : 400, \n  \"errorNum\" : 400 \n}\n

\n
Example: Providing a job-id for a non-existing job:

shell> curl -X PUT --dump - http://localhost:8529/_api/job/notthere\n\nHTTP/1.1 404 Not Found\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"error\" : true, \n  \"errorMessage\" : \"not found\", \n  \"code\" : 404, \n  \"errorNum\" : 404 \n}\n

\n
Example: Fetching the result of an HTTP GET job:

shell> curl -X PUT --header 'x-arango-async: store' --dump - http://localhost:8529/_api/version\n\nHTTP/1.1 202 Accepted\ncontent-type: text/plain; charset=utf-8\nx-arango-async-id: 602986951\n\nshell> curl -X PUT --dump - http://localhost:8529/_api/job/602986951\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\nx-arango-async-id: 602986951\n\n{ \n  \"server\" : \"arango\", \n  \"version\" : \"2.7.0-devel\" \n}\n

\n
Example: Fetching the result of an HTTP POST job that failed:

shell> curl -X PUT --header 'x-arango-async: store' --data-binary @- --dump - http://localhost:8529/_api/collection <<EOF\n{ \n  \"name\" : \" this name is invalid \" \n}\nEOF\n\nHTTP/1.1 202 Accepted\ncontent-type: text/plain; charset=utf-8\nx-arango-async-id: 603052487\n\nshell> curl -X PUT --dump - http://localhost:8529/_api/job/603052487\n\nHTTP/1.1 400 Bad Request\ncontent-type: application/json; charset=utf-8\nx-arango-async-id: 603052487\n\n{ \n  \"error\" : true, \n  \"code\" : 400, \n  \"errorNum\" : 400, \n  \"errorMessage\" : \"expected PUT /_api/collection/<collection-name>/<action>\" \n}\n

\n
", + "parameters": [ + { + "description": "The async job id.
", + "format": "string", + "in": "path", + "name": "job-id", + "required": true, + "type": "string" + } + ], + "responses": { + "204": { + "description": "is returned if the job requested via job-id is still in the queue of pending (or not yet finished) jobs. In this case, no x-arango-async-id HTTP header will be returned.
" + }, + "400": { + "description": "is returned if no job-id was specified in the request. In this case, no x-arango-async-id HTTP header will be returned.
" + }, + "404": { + "description": "is returned if the job was not found or already deleted or fetched from the job result list. In this case, no x-arango-async-id HTTP header will be returned.
" + } + }, + "summary": " Return result of an async job", + "tags": [ + "job" + ], + "x-examples": [], + "x-filename": "job - arangod/HttpServer/AsyncJobManager.cpp, arangod/RestHandler/RestJobHandler.cpp" + } + }, + "/_api/job/{job-id}/cancel": { + "put": { + "description": "\n\nCancels the currently running job identified by job-id. Note that it still might take some time to actually cancel the running async job.

Example:

shell> curl -X POST --header 'x-arango-async: store' --data-binary @- --dump - http://localhost:8529/_api/cursor <<EOF\n{ \n  \"query\" : \"FOR i IN 1..10 FOR j IN 1..10 LET x = sleep(1.0) FILTER i == 5 && j == 5 RETURN 42\" \n}\nEOF\n\nHTTP/1.1 202 Accepted\ncontent-type: text/plain; charset=utf-8\nx-arango-async-id: 602659271\n\nshell> curl --dump - http://localhost:8529/_api/job/pending\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n[ \n  \"602659271\" \n]\nshell> curl -X PUT --dump - http://localhost:8529/_api/job/602659271/cancel\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : true \n}\nshell> curl --dump - http://localhost:8529/_api/job/pending\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n[ ]\n

\n
", + "parameters": [ + { + "description": "The async job id.
", + "format": "string", + "in": "path", + "name": "job-id", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "cancel has been initiated.
" + }, + "400": { + "description": "is returned if no job-id was specified in the request. In this case, no x-arango-async-id HTTP header will be returned.
" + }, + "404": { + "description": "is returned if the job was not found or already deleted or fetched from the job result list. In this case, no x-arango-async-id HTTP header will be returned.
" + } + }, + "summary": " Cancel async job", + "tags": [ + "job" + ], + "x-examples": [], + "x-filename": "job - arangod/HttpServer/AsyncJobManager.cpp, arangod/RestHandler/RestJobHandler.cpp" + } + }, + "/_api/job/{type}": { + "delete": { + "description": "\n\nDeletes either all job results, expired job results, or the result of a specific job. Clients can use this method to perform an eventual garbage collection of job results.

Example: Deleting all jobs:

shell> curl -X PUT --header 'x-arango-async: store' --dump - http://localhost:8529/_api/version\n\nHTTP/1.1 202 Accepted\ncontent-type: text/plain; charset=utf-8\nx-arango-async-id: 602790343\n\nshell> curl -X DELETE --dump - http://localhost:8529/_api/job/all\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : true \n}\n

\n
Example: Deleting expired jobs:

shell> curl -X PUT --header 'x-arango-async: store' --dump - http://localhost:8529/_api/version\n\nHTTP/1.1 202 Accepted\ncontent-type: text/plain; charset=utf-8\nx-arango-async-id: 602855879\n\nshell> curl --dump - http://localhost:8529/_admin/time\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"time\" : 1443627576.40017, \n  \"error\" : false, \n  \"code\" : 200 \n}\nshell> curl -X DELETE --dump - http://localhost:8529/_api/job/expired?stamp=1443627576.40017\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : true \n}\nshell> curl --dump - http://localhost:8529/_api/job/pending\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n[ ]\n

\n
Example: Deleting the result of a specific job:

shell> curl -X PUT --header 'x-arango-async: store' --dump - http://localhost:8529/_api/version\n\nHTTP/1.1 202 Accepted\ncontent-type: text/plain; charset=utf-8\nx-arango-async-id: 602921415\n\nshell> curl -X DELETE --dump - http://localhost:8529/_api/job/602921415\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : true \n}\n

\n
Example: Deleting the result of a non-existing job:

shell> curl -X DELETE --dump - http://localhost:8529/_api/job/AreYouThere\n\nHTTP/1.1 404 Not Found\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"error\" : true, \n  \"errorMessage\" : \"not found\", \n  \"code\" : 404, \n  \"errorNum\" : 404 \n}\n

\n
", + "parameters": [ + { + "description": "The type of jobs to delete. type can be: *all: Deletes all jobs results. Currently executing or queued async jobs will not be stopped by this call. *expired: Deletes expired results. To determine the expiration status of a result, pass the stamp URL parameter. stamp needs to be a UNIX timestamp, and all async job results created at a lower timestamp will be deleted. *an actual job-id: In this case, the call will remove the result of the specified async job. If the job is currently executing or queued, it will not be aborted.
", + "format": "string", + "in": "path", + "name": "type", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "is returned if the deletion operation was carried out successfully. This code will also be returned if no results were deleted.
" + }, + "400": { + "description": "is returned if type is not specified or has an invalid value.
" + }, + "404": { + "description": "is returned if type is a job-id but no async job with the specified id was found.
" + } + }, + "summary": " Deletes async job", + "tags": [ + "job" + ], + "x-examples": [], + "x-filename": "job - arangod/HttpServer/AsyncJobManager.cpp, arangod/RestHandler/RestJobHandler.cpp" + }, + "get": { + "description": "\n\nReturns the list of ids of async jobs with a specific status (either done or pending). The list can be used by the client to get an overview of the job system status and to retrieve completed job results later.

Example: Fetching the list of done jobs:

shell> curl -X PUT --header 'x-arango-async: store' --dump - http://localhost:8529/_api/version\n\nHTTP/1.1 202 Accepted\ncontent-type: text/plain; charset=utf-8\nx-arango-async-id: 603118023\n\nshell> curl --dump - http://localhost:8529/_api/job/done\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n[ \n  \"603118023\" \n]\n

\n
Example: Fetching the list of pending jobs:

shell> curl -X PUT --header 'x-arango-async: store' --dump - http://localhost:8529/_api/version\n\nHTTP/1.1 202 Accepted\ncontent-type: text/plain; charset=utf-8\nx-arango-async-id: 603183559\n\nshell> curl --dump - http://localhost:8529/_api/job/pending\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n[ ]\n

\n
Example: Querying the status of a pending job: (we create a sleep job therefore...)

shell> curl --header 'x-arango-async: store' --dump - http://localhost:8529/_admin/sleep?duration=30\n\nHTTP/1.1 202 Accepted\ncontent-type: text/plain; charset=utf-8\nx-arango-async-id: 603249095\n\nshell> curl --dump - http://localhost:8529/_api/job/pending\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n[ \n  \"603249095\" \n]\nshell> curl -X DELETE --dump - http://localhost:8529/_api/job/603249095\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : true \n}\n

\n
", + "parameters": [ + { + "description": "The type of jobs to return. The type can be either done or pending. Setting the type to done will make the method return the ids of already completed async jobs for which results can be fetched. Setting the type to pending will return the ids of not yet finished async jobs.
", + "format": "string", + "in": "path", + "name": "type", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "is returned if the list can be compiled successfully. Note: the list might be empty.
" + }, + "400": { + "description": "is returned if type is not specified or has an invalid value.
" + } + }, + "summary": " Returns list of async jobs", + "tags": [ + "job" + ], + "x-examples": [], + "x-filename": "job - arangod/HttpServer/AsyncJobManager.cpp, arangod/RestHandler/RestJobHandler.cpp" + } + }, + "/_api/query": { + "post": { + "description": "**A json post document with these Properties is required:**
  • query: To validate a query string without executing it, the query string can be passed to the server via an HTTP POST request.

Example: a Valid query

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/query <<EOF\n{ \"query\" : \"FOR p IN products FILTER p.name == @name LIMIT 2 RETURN p.n\" }\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"error\" : false, \n  \"code\" : 200, \n  \"parsed\" : true, \n  \"collections\" : [ \n    \"products\" \n  ], \n  \"bindVars\" : [ \n    \"name\" \n  ], \n  \"ast\" : [ \n    { \n      \"type\" : \"root\", \n      \"subNodes\" : [ \n        { \n          \"type\" : \"for\", \n          \"subNodes\" : [ \n            { \n              \"type\" : \"variable\", \n              \"name\" : \"p\", \n              \"id\" : 0 \n            }, \n            { \n              \"type\" : \"collection\", \n              \"name\" : \"products\" \n            } \n          ] \n        }, \n        { \n          \"type\" : \"filter\", \n          \"subNodes\" : [ \n            { \n              \"type\" : \"compare ==\", \n              \"subNodes\" : [ \n                { \n                  \"type\" : \"attribute access\", \n                  \"name\" : \"name\", \n                  \"subNodes\" : [ \n                    { \n                      \"type\" : \"reference\", \n                      \"name\" : \"p\", \n                      \"id\" : 0 \n                    } \n                  ] \n                }, \n                { \n                  \"type\" : \"parameter\", \n                  \"name\" : \"name\" \n                } \n              ] \n            } \n          ] \n        }, \n        { \n          \"type\" : \"limit\", \n          \"subNodes\" : [ \n            { \n              \"type\" : \"value\", \n              \"value\" : 0 \n            }, \n            { \n              \"type\" : \"value\", \n              \"value\" : 2 \n            } \n          ] \n        }, \n        { \n          \"type\" : \"return\", \n          \"subNodes\" : [ \n            { \n              \"type\" : \"attribute access\", \n              \"name\" : \"n\", \n              \"subNodes\" : [ \n                { \n                  \"type\" : \"reference\", \n                  \"name\" : \"p\", \n                  \"id\" : 0 \n                } \n              ] \n            } \n          ] \n        } \n      ] \n    } \n  ], \n  \"warnings\" : [ ] \n}\n

\n
Example: an Invalid query

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/query <<EOF\n{ \"query\" : \"FOR p IN products FILTER p.name = @name LIMIT 2 RETURN p.n\" }\nEOF\n\nHTTP/1.1 400 Bad Request\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"error\" : true, \n  \"errorMessage\" : \"syntax error, unexpected assignment near '= @name LIMIT 2 RETURN p.n' at position 1:33\", \n  \"code\" : 400, \n  \"errorNum\" : 1501 \n}\n

\n
", + "parameters": [ + { + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "$ref": "#/definitions/PostApiQueryProperties" + }, + "x-description-offset": 59 + } + ], + "responses": { + "200": { + "description": "If the query is valid, the server will respond with HTTP 200 and return the names of the bind parameters it found in the query (if any) in the bindVars attribute of the response. It will also return an array of the collections used in the query in the collections attribute. If a query can be parsed successfully, the ast attribute of the returned JSON will contain the abstract syntax tree representation of the query. The format of the ast is subject to change in future versions of ArangoDB, but it can be used to inspect how ArangoDB interprets a given query. Note that the abstract syntax tree will be returned without any optimizations applied to it.
" + }, + "400": { + "description": "The server will respond with HTTP 400 in case of a malformed request, or if the query contains a parse error. The body of the response will contain the error details embedded in a JSON object.
" + } + }, + "summary": " Parse an AQL query", + "tags": [ + "AQL" + ], + "x-examples": [], + "x-filename": "AQL - arangod/RestHandler/RestQueryHandler.cpp, js/actions/api-aqlfunction.js, js/actions/api-explain.js, arangod/RestHandler/RestQueryCacheHandler.cpp" + } + }, + "/_api/query-cache": { + "delete": { + "description": "\n\nclears the query cache", + "parameters": [], + "responses": { + "200": { + "description": "The server will respond with HTTP 200 when the cache was cleared successfully.
" + }, + "400": { + "description": "The server will respond with HTTP 400 in case of a malformed request.
" + } + }, + "summary": " Clears any results in the AQL query cache", + "tags": [ + "AQL" + ], + "x-examples": [], + "x-filename": "AQL - arangod/RestHandler/RestQueryHandler.cpp, js/actions/api-aqlfunction.js, js/actions/api-explain.js, arangod/RestHandler/RestQueryCacheHandler.cpp" + } + }, + "/_api/query-cache/properties": { + "get": { + "description": "\n\nReturns the global AQL query cache configuration. The configuration is a JSON object with the following properties:
  • mode: the mode the AQL query cache operates in. The mode is one of the following values: off, on or demand.
  • maxResults: the maximum number of query results that will be stored per database-specific cache.
", + "parameters": [], + "responses": { + "200": { + "description": "Is returned if the properties can be retrieved successfully.
" + }, + "400": { + "description": "The server will respond with HTTP 400 in case of a malformed request,

" + } + }, + "summary": " Returns the global properties for the AQL query cache", + "tags": [ + "AQL" + ], + "x-examples": [], + "x-filename": "AQL - arangod/RestHandler/RestQueryHandler.cpp, js/actions/api-aqlfunction.js, js/actions/api-explain.js, arangod/RestHandler/RestQueryCacheHandler.cpp" + }, + "put": { + "description": "\n\nAfter the properties have been changed, the current set of properties will be returned in the HTTP response.
Note: changing the properties may invalidate all results in the cache. The global properties for AQL query cache. The properties need to be passed in the attribute properties in the body of the HTTP request. properties needs to be a JSON object with the following properties:
**A json post document with these Properties is required:**
  • mode: the mode the AQL query cache should operate in. Possible values are off, on or demand.
  • maxResults: the maximum number of query results that will be stored per database-specific cache.

", + "parameters": [ + { + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "$ref": "#/definitions/PutApiQueryCacheProperties" + }, + "x-description-offset": 489 + } + ], + "responses": { + "200": { + "description": "Is returned if the properties were changed successfully.
" + }, + "400": { + "description": "The server will respond with HTTP 400 in case of a malformed request,

" + } + }, + "summary": " Globally adjusts the AQL query result cache properties", + "tags": [ + "AQL" + ], + "x-examples": [], + "x-filename": "AQL - arangod/RestHandler/RestQueryHandler.cpp, js/actions/api-aqlfunction.js, js/actions/api-explain.js, arangod/RestHandler/RestQueryCacheHandler.cpp" + } + }, + "/_api/query/current": { + "get": { + "description": "\n\nReturns an array containing the AQL queries currently running in the selected database. Each query is a JSON object with the following attributes:
  • id: the query's id
  • query: the query string (potentially truncated)
  • started: the date and time when the query was started
  • runTime: the query's run time up to the point the list of queries was queried
", + "parameters": [], + "responses": { + "200": { + "description": "Is returned when the list of queries can be retrieved successfully.
" + }, + "400": { + "description": "The server will respond with HTTP 400 in case of a malformed request,

" + } + }, + "summary": " Returns the currently running AQL queries", + "tags": [ + "AQL" + ], + "x-examples": [], + "x-filename": "AQL - arangod/RestHandler/RestQueryHandler.cpp, js/actions/api-aqlfunction.js, js/actions/api-explain.js, arangod/RestHandler/RestQueryCacheHandler.cpp" + } + }, + "/_api/query/properties": { + "get": { + "description": "\n\nReturns the current query tracking configuration. The configuration is a JSON object with the following properties:
  • enabled: if set to true, then queries will be tracked. If set to false, neither queries nor slow queries will be tracked.
  • trackSlowQueries: if set to true, then slow queries will be tracked in the list of slow queries if their runtime exceeds the value set in slowQueryThreshold. In order for slow queries to be tracked, the enabled property must also be set to true.
  • maxSlowQueries: the maximum number of slow queries to keep in the list of slow queries. If the list of slow queries is full, the oldest entry in it will be discarded when additional slow queries occur.
  • slowQueryThreshold: the threshold value for treating a query as slow. A query with a runtime greater or equal to this threshold value will be put into the list of slow queries when slow query tracking is enabled. The value for slowQueryThreshold is specified in seconds.
  • maxQueryStringLength: the maximum query string length to keep in the list of queries. Query strings can have arbitrary lengths, and this property can be used to save memory in case very long query strings are used. The value is specified in bytes.
", + "parameters": [], + "responses": { + "200": { + "description": "Is returned if properties were retrieved successfully.
" + }, + "400": { + "description": "The server will respond with HTTP 400 in case of a malformed request,

" + } + }, + "summary": " Returns the properties for the AQL query tracking", + "tags": [ + "AQL" + ], + "x-examples": [], + "x-filename": "AQL - arangod/RestHandler/RestQueryHandler.cpp, js/actions/api-aqlfunction.js, js/actions/api-explain.js, arangod/RestHandler/RestQueryCacheHandler.cpp" + }, + "put": { + "description": "**A json post document with these Properties is required:**
  • slowQueryThreshold: The threshold value for treating a query as slow. A query with a runtime greater or equal to this threshold value will be put into the list of slow queries when slow query tracking is enabled. The value for slowQueryThreshold is specified in seconds.
  • enabled: If set to true, then queries will be tracked. If set to false, neither queries nor slow queries will be tracked.
  • maxSlowQueries: The maximum number of slow queries to keep in the list of slow queries. If the list of slow queries is full, the oldest entry in it will be discarded when additional slow queries occur.
  • trackSlowQueries: If set to true, then slow queries will be tracked in the list of slow queries if their runtime exceeds the value set in slowQueryThreshold. In order for slow queries to be tracked, the enabled property must also be set to true.
  • maxQueryStringLength: The maximum query string length to keep in the list of queries. Query strings can have arbitrary lengths, and this property can be used to save memory in case very long query strings are used. The value is specified in bytes.
\n\nThe properties need to be passed in the attribute properties in the body of the HTTP request. properties needs to be a JSON object.
After the properties have been changed, the current set of properties will be returned in the HTTP response.
", + "parameters": [ + { + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "$ref": "#/definitions/PutApiQueryProperties" + }, + "x-description-offset": 59 + } + ], + "responses": { + "200": { + "description": "Is returned if the properties were changed successfully.
" + }, + "400": { + "description": "The server will respond with HTTP 400 in case of a malformed request,

" + } + }, + "summary": " Changes the properties for the AQL query tracking", + "tags": [ + "AQL" + ], + "x-examples": [], + "x-filename": "AQL - arangod/RestHandler/RestQueryHandler.cpp, js/actions/api-aqlfunction.js, js/actions/api-explain.js, arangod/RestHandler/RestQueryCacheHandler.cpp" + } + }, + "/_api/query/slow": { + "delete": { + "description": "\n\nClears the list of slow AQL queries
", + "parameters": [], + "responses": { + "200": { + "description": "The server will respond with HTTP 200 when the list of queries was cleared successfully.
" + }, + "400": { + "description": "The server will respond with HTTP 400 in case of a malformed request.
" + } + }, + "summary": " Clears the list of slow AQL queries", + "tags": [ + "AQL" + ], + "x-examples": [], + "x-filename": "AQL - arangod/RestHandler/RestQueryHandler.cpp, js/actions/api-aqlfunction.js, js/actions/api-explain.js, arangod/RestHandler/RestQueryCacheHandler.cpp" + }, + "get": { + "description": "\n\nReturns an array containing the last AQL queries that exceeded the slow query threshold in the selected database. The maximum amount of queries in the list can be controlled by setting the query tracking property `maxSlowQueries`. The threshold for treating a query as slow can be adjusted by setting the query tracking property `slowQueryThreshold`.
Each query is a JSON object with the following attributes:
  • id: the query's id
  • query: the query string (potentially truncated)
  • started: the date and time when the query was started
  • runTime: the query's run time up to the point the list of queries was queried
", + "parameters": [], + "responses": { + "200": { + "description": "Is returned when the list of queries can be retrieved successfully.
" + }, + "400": { + "description": "The server will respond with HTTP 400 in case of a malformed request,

" + } + }, + "summary": " Returns the list of slow AQL queries", + "tags": [ + "AQL" + ], + "x-examples": [], + "x-filename": "AQL - arangod/RestHandler/RestQueryHandler.cpp, js/actions/api-aqlfunction.js, js/actions/api-explain.js, arangod/RestHandler/RestQueryCacheHandler.cpp" + } + }, + "/_api/query/{query-id}": { + "delete": { + "description": "\n\nKills a running query. The query will be terminated at the next cancelation point.
", + "parameters": [ + { + "description": "The id of the query.
", + "format": "string", + "in": "path", + "name": "query-id", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "The server will respond with HTTP 200 when the query was still running when the kill request was executed and the query's kill flag was set.
" + }, + "400": { + "description": "The server will respond with HTTP 400 in case of a malformed request.
" + }, + "404": { + "description": "The server will respond with HTTP 404 when no query with the specified id was found.
" + } + }, + "summary": " Kills a running AQL query", + "tags": [ + "AQL" + ], + "x-examples": [], + "x-filename": "AQL - arangod/RestHandler/RestQueryHandler.cpp, js/actions/api-aqlfunction.js, js/actions/api-explain.js, arangod/RestHandler/RestQueryCacheHandler.cpp" + } + }, + "/_api/replication/applier-config": { + "get": { + "description": "\n\nReturns the configuration of the replication applier.
The body of the response is a JSON object with the configuration. The following attributes may be present in the configuration:
  • endpoint: the logger server to connect to (e.g. \"tcp://192.168.173.13:8529\").
  • database: the name of the database to connect to (e.g. \"_system\").
  • username: an optional ArangoDB username to use when connecting to the endpoint.
  • password: the password to use when connecting to the endpoint.
  • maxConnectRetries: the maximum number of connection attempts the applier will make in a row. If the applier cannot establish a connection to the endpoint in this number of attempts, it will stop itself.
  • connectTimeout: the timeout (in seconds) when attempting to connect to the endpoint. This value is used for each connection attempt.
  • requestTimeout: the timeout (in seconds) for individual requests to the endpoint.
  • chunkSize: the requested maximum size for log transfer packets that is used when the endpoint is contacted.
  • autoStart: whether or not to auto-start the replication applier on (next and following) server starts
  • adaptivePolling: whether or not the replication applier will use adaptive polling.
  • includeSystem: whether or not system collection operations will be applied
  • requireFromPresent: if set to true, then the replication applier will check at start whether the start tick from which it starts or resumes replication is still present on the master. If not, then there would be data loss. If requireFromPresent is true, the replication applier will abort with an appropriate error message. If set to false, then the replication applier will still start, and ignore the data loss.
  • verbose: if set to true, then a log line will be emitted for all operations performed by the replication applier. This should be used for debugging replication problems only.
  • restrictType: the configuration for restrictCollections
  • restrictCollections: the optional array of collections to include or exclude, based on the setting of restrictType

Example:

shell> curl --dump - http://localhost:8529/_api/replication/applier-config\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"requestTimeout\" : 300, \n  \"connectTimeout\" : 10, \n  \"ignoreErrors\" : 0, \n  \"maxConnectRetries\" : 100, \n  \"sslProtocol\" : 0, \n  \"chunkSize\" : 0, \n  \"autoStart\" : false, \n  \"adaptivePolling\" : true, \n  \"includeSystem\" : true, \n  \"requireFromPresent\" : false, \n  \"verbose\" : false, \n  \"restrictType\" : \"\", \n  \"restrictCollections\" : [ ] \n}\n

\n
", + "parameters": [], + "responses": { + "200": { + "description": "is returned if the request was executed successfully.
" + }, + "405": { + "description": "is returned when an invalid HTTP method is used.
" + }, + "500": { + "description": "is returned if an error occurred while assembling the response.
" + } + }, + "summary": " Return configuration of replication applier", + "tags": [ + "Replication" + ], + "x-examples": [], + "x-filename": "Replication - arangod/RestHandler/RestReplicationHandler.cpp" + }, + "put": { + "description": "**A json post document with these Properties is required:**
  • username: an optional ArangoDB username to use when connecting to the endpoint.
  • includeSystem: whether or not system collection operations will be applied
  • endpoint: the logger server to connect to (e.g. \"tcp://192.168.173.13:8529\"). The endpoint must be specified.
  • verbose: if set to true, then a log line will be emitted for all operations performed by the replication applier. This should be used for debugging replication problems only.
  • connectTimeout: the timeout (in seconds) when attempting to connect to the endpoint. This value is used for each connection attempt.
  • database: the name of the database on the endpoint. If not specified, defaults to the current local database name.
  • restrictType: the configuration for restrictCollections; Has to be either include or exclude
  • requestTimeout: the timeout (in seconds) for individual requests to the endpoint.
  • requireFromPresent: if set to true, then the replication applier will check at start whether the start tick from which it starts or resumes replication is still present on the master. If not, then there would be data loss. If requireFromPresent is true, the replication applier will abort with an appropriate error message. If set to false, then the replication applier will still start, and ignore the data loss.
  • maxConnectRetries: the maximum number of connection attempts the applier will make in a row. If the applier cannot establish a connection to the endpoint in this number of attempts, it will stop itself.
  • autoStart: whether or not to auto-start the replication applier on (next and following) server starts
  • adaptivePolling: if set to true, the replication applier will fall to sleep for an increasingly long period in case the logger server at the endpoint does not have any more replication events to apply. Using adaptive polling is thus useful to reduce the amount of work for both the applier and the logger server for cases when there are only infrequent changes. The downside is that when using adaptive polling, it might take longer for the replication applier to detect that there are new replication events on the logger server.
    Setting adaptivePolling to false will make the replication applier contact the logger server in a constant interval, regardless of whether the logger server provides updates frequently or seldom.
  • password: the password to use when connecting to the endpoint.
  • restrictCollections: the array of collections to include or exclude, based on the setting of restrictType of type string
  • chunkSize: the requested maximum size for log transfer packets that is used when the endpoint is contacted.
\n\nSets the configuration of the replication applier. The configuration can only be changed while the applier is not running. The updated configuration will be saved immediately but only become active with the next start of the applier.
In case of success, the body of the response is a JSON object with the updated configuration.

Example:

shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/replication/applier-config <<EOF\n{ \n  \"endpoint\" : \"tcp://127.0.0.1:8529\", \n  \"username\" : \"replicationApplier\", \n  \"password\" : \"applier1234@foxx\", \n  \"chunkSize\" : 4194304, \n  \"autoStart\" : false, \n  \"adaptivePolling\" : true \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"endpoint\" : \"tcp://127.0.0.1:8529\", \n  \"database\" : \"_system\", \n  \"username\" : \"replicationApplier\", \n  \"requestTimeout\" : 300, \n  \"connectTimeout\" : 10, \n  \"ignoreErrors\" : 0, \n  \"maxConnectRetries\" : 100, \n  \"sslProtocol\" : 0, \n  \"chunkSize\" : 4194304, \n  \"autoStart\" : false, \n  \"adaptivePolling\" : true, \n  \"includeSystem\" : true, \n  \"requireFromPresent\" : false, \n  \"verbose\" : false, \n  \"restrictType\" : \"\", \n  \"restrictCollections\" : [ ] \n}\n

\n
", + "parameters": [ + { + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "$ref": "#/definitions/JSF_put_api_replication_applier_adjust" + }, + "x-description-offset": 59 + } + ], + "responses": { + "200": { + "description": "is returned if the request was executed successfully.
" + }, + "400": { + "description": "is returned if the configuration is incomplete or malformed, or if the replication applier is currently running.
" + }, + "405": { + "description": "is returned when an invalid HTTP method is used.
" + }, + "500": { + "description": "is returned if an error occurred while assembling the response.
" + } + }, + "summary": " Adjust configuration of replication applier", + "tags": [ + "Replication" + ], + "x-examples": [], + "x-filename": "Replication - arangod/RestHandler/RestReplicationHandler.cpp" + } + }, + "/_api/replication/applier-start": { + "put": { + "description": "\n\nStarts the replication applier. This will return immediately if the replication applier is already running.
If the replication applier is not already running, the applier configuration will be checked, and if it is complete, the applier will be started in a background thread. This means that even if the applier will encounter any errors while running, they will not be reported in the response to this method.
To detect replication applier errors after the applier was started, use the /_api/replication/applier-state API instead.

Example:

shell> curl -X PUT --dump - http://localhost:8529/_api/replication/applier-start\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"state\" : { \n    \"running\" : true, \n    \"lastAppliedContinuousTick\" : null, \n    \"lastProcessedContinuousTick\" : null, \n    \"lastAvailableContinuousTick\" : null, \n    \"safeResumeTick\" : null, \n    \"progress\" : { \n      \"time\" : \"2015-09-30T15:38:57Z\", \n      \"message\" : \"applier created\", \n      \"failedConnects\" : 0 \n    }, \n    \"totalRequests\" : 0, \n    \"totalFailedConnects\" : 0, \n    \"totalEvents\" : 0, \n    \"totalOperationsExcluded\" : 0, \n    \"lastError\" : { \n      \"errorNum\" : 0 \n    }, \n    \"time\" : \"2015-09-30T15:40:09Z\" \n  }, \n  \"server\" : { \n    \"version\" : \"2.7.0-devel\", \n    \"serverId\" : \"4865533481307\" \n  }, \n  \"endpoint\" : \"tcp://127.0.0.1:8529\", \n  \"database\" : \"_system\" \n}\n

\n
", + "parameters": [ + { + "description": "The remote lastLogTick value from which to start applying. If not specified, the last saved tick from the previous applier run is used. If there is no previous applier state saved, the applier will start at the beginning of the logger server's log.
", + "in": "query", + "name": "from", + "required": false, + "type": "string" + } + ], + "responses": { + "200": { + "description": "is returned if the request was executed successfully.
" + }, + "400": { + "description": "is returned if the replication applier is not fully configured or the configuration is invalid.
" + }, + "405": { + "description": "is returned when an invalid HTTP method is used.
" + }, + "500": { + "description": "is returned if an error occurred while assembling the response.
" + } + }, + "summary": " Start replication applier", + "tags": [ + "Replication" + ], + "x-examples": [], + "x-filename": "Replication - arangod/RestHandler/RestReplicationHandler.cpp" + } + }, + "/_api/replication/applier-state": { + "get": { + "description": "\n\nReturns the state of the replication applier, regardless of whether the applier is currently running or not.
The response is a JSON object with the following attributes:
  • state: a JSON object with the following sub-attributes:
    - running: whether or not the applier is active and running
    - lastAppliedContinuousTick: the last tick value from the continuous replication log the applier has applied.
    - lastProcessedContinuousTick: the last tick value from the continuous replication log the applier has processed.
    Regularly, the last applied and last processed tick values should be identical. For transactional operations, the replication applier will first process incoming log events before applying them, so the processed tick value might be higher than the applied tick value. This will be the case until the applier encounters the transaction commit log event for the transaction.
    - lastAvailableContinuousTick: the last tick value the logger server can provide.
    - time: the time on the applier server.
    - totalRequests: the total number of requests the applier has made to the endpoint.
    - totalFailedConnects: the total number of failed connection attempts the applier has made.
    - totalEvents: the total number of log events the applier has processed.
    - totalOperationsExcluded: the total number of log events excluded because of restrictCollections.
    - progress: a JSON object with details about the replication applier progress. It contains the following sub-attributes if there is progress to report:
    - message: a textual description of the progress
    - time: the date and time the progress was logged
    - failedConnects: the current number of failed connection attempts
    - lastError: a JSON object with details about the last error that happened on the applier. It contains the following sub-attributes if there was an error:
    - errorNum: a numerical error code
    - errorMessage: a textual error description
    - time: the date and time the error occurred
    In case no error has occurred, lastError will be empty.
  • server: a JSON object with the following sub-attributes:
    - version: the applier server's version
    - serverId: the applier server's id
  • endpoint: the endpoint the applier is connected to (if applier is active) or will connect to (if applier is currently inactive)
  • database: the name of the database the applier is connected to (if applier is active) or will connect to (if applier is currently inactive)

Example: Fetching the state of an inactive applier:

shell> curl --dump - http://localhost:8529/_api/replication/applier-state\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"state\" : { \n    \"running\" : false, \n    \"lastAppliedContinuousTick\" : null, \n    \"lastProcessedContinuousTick\" : null, \n    \"lastAvailableContinuousTick\" : null, \n    \"safeResumeTick\" : null, \n    \"progress\" : { \n      \"time\" : \"2015-09-30T15:40:09Z\", \n      \"message\" : \"applier shut down\", \n      \"failedConnects\" : 1 \n    }, \n    \"totalRequests\" : 1, \n    \"totalFailedConnects\" : 1, \n    \"totalEvents\" : 0, \n    \"totalOperationsExcluded\" : 0, \n    \"lastError\" : { \n      \"time\" : \"2015-09-30T15:40:10Z\", \n      \"errorMessage\" : \"could not connect to master at tcp://127.0.0.1:8529: Could not connect to 'tcp://127.0.0.1:8529' 'connect() failed with #111 - Connection refused'\", \n      \"errorNum\" : 1412 \n    }, \n    \"time\" : \"2015-09-30T15:40:10Z\" \n  }, \n  \"server\" : { \n    \"version\" : \"2.7.0-devel\", \n    \"serverId\" : \"4865533481307\" \n  }, \n  \"endpoint\" : \"tcp://127.0.0.1:8529\", \n  \"database\" : \"_system\" \n}\n

\n
Example: Fetching the state of an active applier:

shell> curl --dump - http://localhost:8529/_api/replication/applier-state\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"state\" : { \n    \"running\" : true, \n    \"lastAppliedContinuousTick\" : null, \n    \"lastProcessedContinuousTick\" : null, \n    \"lastAvailableContinuousTick\" : null, \n    \"safeResumeTick\" : null, \n    \"progress\" : { \n      \"time\" : \"2015-09-30T15:40:10Z\", \n      \"message\" : \"fetching master state information\", \n      \"failedConnects\" : 1 \n    }, \n    \"totalRequests\" : 2, \n    \"totalFailedConnects\" : 2, \n    \"totalEvents\" : 0, \n    \"totalOperationsExcluded\" : 0, \n    \"lastError\" : { \n      \"errorNum\" : 0 \n    }, \n    \"time\" : \"2015-09-30T15:40:10Z\" \n  }, \n  \"server\" : { \n    \"version\" : \"2.7.0-devel\", \n    \"serverId\" : \"4865533481307\" \n  }, \n  \"endpoint\" : \"tcp://127.0.0.1:8529\", \n  \"database\" : \"_system\" \n}\n

\n
", + "parameters": [], + "responses": { + "200": { + "description": "is returned if the request was executed successfully.
" + }, + "405": { + "description": "is returned when an invalid HTTP method is used.
" + }, + "500": { + "description": "is returned if an error occurred while assembling the response.
" + } + }, + "summary": " State of the replication applier", + "tags": [ + "Replication" + ], + "x-examples": [], + "x-filename": "Replication - arangod/RestHandler/RestReplicationHandler.cpp" + } + }, + "/_api/replication/applier-stop": { + "put": { + "description": "\n\nStops the replication applier. This will return immediately if the replication applier is not running.

Example:

shell> curl -X PUT --dump - http://localhost:8529/_api/replication/applier-stop\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"state\" : { \n    \"running\" : false, \n    \"lastAppliedContinuousTick\" : null, \n    \"lastProcessedContinuousTick\" : null, \n    \"lastAvailableContinuousTick\" : null, \n    \"safeResumeTick\" : null, \n    \"progress\" : { \n      \"time\" : \"2015-09-30T15:40:10Z\", \n      \"message\" : \"applier shut down\", \n      \"failedConnects\" : 1 \n    }, \n    \"totalRequests\" : 3, \n    \"totalFailedConnects\" : 3, \n    \"totalEvents\" : 0, \n    \"totalOperationsExcluded\" : 0, \n    \"lastError\" : { \n      \"time\" : \"2015-09-30T15:40:11Z\", \n      \"errorMessage\" : \"could not connect to master at tcp://127.0.0.1:8529: Could not connect to 'tcp://127.0.0.1:8529' 'connect() failed with #111 - Connection refused'\", \n      \"errorNum\" : 1412 \n    }, \n    \"time\" : \"2015-09-30T15:40:11Z\" \n  }, \n  \"server\" : { \n    \"version\" : \"2.7.0-devel\", \n    \"serverId\" : \"4865533481307\" \n  }, \n  \"endpoint\" : \"tcp://127.0.0.1:8529\", \n  \"database\" : \"_system\" \n}\n

\n
", + "parameters": [], + "responses": { + "200": { + "description": "is returned if the request was executed successfully.
" + }, + "405": { + "description": "is returned when an invalid HTTP method is used.
" + }, + "500": { + "description": "is returned if an error occurred while assembling the response.
" + } + }, + "summary": " Stop replication applier", + "tags": [ + "Replication" + ], + "x-examples": [], + "x-filename": "Replication - arangod/RestHandler/RestReplicationHandler.cpp" + } + }, + "/_api/replication/batch": { + "post": { + "description": "**A json post document with these Properties is required:**
  • ttl: the time-to-live for the new batch (in seconds)
    A JSON object with the batch configuration.
\n\nCreates a new dump batch and returns the batch's id.
The response is a JSON object with the following attributes:
  • id: the id of the batch
Note: on a coordinator, this request must have the URL parameter DBserver which must be an ID of a DBserver. The very same request is forwarded synchronously to that DBserver. It is an error if this attribute is not bound in the coordinator case.
", + "parameters": [ + { + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "$ref": "#/definitions/JSF_post_batch_replication" + }, + "x-description-offset": 59 + } + ], + "responses": { + "204": { + "description": "is returned if the batch was created successfully.
" + }, + "400": { + "description": "is returned if the ttl value is invalid or if DBserver attribute is not specified or illegal on a coordinator.
" + }, + "405": { + "description": "is returned when an invalid HTTP method is used.
" + } + }, + "summary": " Create new dump batch", + "tags": [ + "Replication" + ], + "x-examples": [], + "x-filename": "Replication - arangod/RestHandler/RestReplicationHandler.cpp" + } + }, + "/_api/replication/batch/{id}": { + "delete": { + "description": "\n\nDeletes the existing dump batch, allowing compaction and cleanup to resume.
Note: on a coordinator, this request must have the URL parameter DBserver which must be an ID of a DBserver. The very same request is forwarded synchronously to that DBserver. It is an error if this attribute is not bound in the coordinator case.
", + "parameters": [ + { + "description": "The id of the batch.
", + "format": "string", + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "responses": { + "204": { + "description": "is returned if the batch was deleted successfully.
" + }, + "400": { + "description": "is returned if the batch was not found.
" + }, + "405": { + "description": "is returned when an invalid HTTP method is used.
" + } + }, + "summary": " Deletes an existing dump batch", + "tags": [ + "Replication" + ], + "x-examples": [], + "x-filename": "Replication - arangod/RestHandler/RestReplicationHandler.cpp" + }, + "put": { + "description": "**A json post document with these Properties is required:**
  • ttl: the time-to-live for the new batch (in seconds)
\n\nExtends the ttl of an existing dump batch, using the batch's id and the provided ttl value.
If the batch's ttl can be extended successfully, the response is empty.
Note: on a coordinator, this request must have the URL parameter DBserver which must be an ID of a DBserver. The very same request is forwarded synchronously to that DBserver. It is an error if this attribute is not bound in the coordinator case.
", + "parameters": [ + { + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "$ref": "#/definitions/JSF_put_batch_replication" + }, + "x-description-offset": 59 + }, + { + "description": "The id of the batch.
", + "format": "string", + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "responses": { + "204": { + "description": "is returned if the batch's ttl was extended successfully.
" + }, + "400": { + "description": "is returned if the ttl value is invalid or the batch was not found.
" + }, + "405": { + "description": "is returned when an invalid HTTP method is used.
" + } + }, + "summary": " Prolong existing dump batch", + "tags": [ + "Replication" + ], + "x-examples": [], + "x-filename": "Replication - arangod/RestHandler/RestReplicationHandler.cpp" + } + }, + "/_api/replication/clusterInventory": { + "get": { + "description": "\n\nReturns the array of collections and indexes available on the cluster.
The response will be an array of JSON objects, one for each collection. Each collection containscontains exactly two keys \"parameters\" and \"indexes\". This information comes from Plan/Collections/{DB-Name}/* in the agency, just that the indexes attribute there is relocated to adjust it to the data format of arangodump.
", + "parameters": [ + { + "description": "Include system collections in the result. The default value is true.
", + "in": "query", + "name": "includeSystem", + "required": false, + "type": "boolean" + } + ], + "responses": { + "200": { + "description": "is returned if the request was executed successfully.
" + }, + "405": { + "description": "is returned when an invalid HTTP method is used.
" + }, + "500": { + "description": "is returned if an error occurred while assembling the response.
" + } + }, + "summary": " Return cluster inventory of collections and indexes", + "tags": [ + "Replication" + ], + "x-examples": [], + "x-filename": "Replication - arangod/RestHandler/RestReplicationHandler.cpp" + } + }, + "/_api/replication/dump": { + "get": { + "description": "\n\nReturns the data from the collection for the requested range.
When the from URL parameter is not used, collection events are returned from the beginning. When the from parameter is used, the result will only contain collection entries which have higher tick values than the specified from value (note: the log entry with a tick value equal to from will be excluded).
The to URL parameter can be used to optionally restrict the upper bound of the result to a certain tick value. If used, the result will only contain collection entries with tick values up to (including) to.
The chunkSize URL parameter can be used to control the size of the result. It must be specified in bytes. The chunkSize value will only be honored approximately. Otherwise a too low chunkSize value could cause the server to not be able to put just one entry into the result and return it. Therefore, the chunkSize value will only be consulted after an entry has been written into the result. If the result size is then bigger than chunkSize, the server will respond with as many entries as there are in the response already. If the result size is still smaller than chunkSize, the server will try to return more data if there's more data left to return.
If chunkSize is not specified, some server-side default value will be used.
The Content-Type of the result is application/x-arango-dump. This is an easy-to-process format, with all entries going onto separate lines in the response body.
Each line itself is a JSON object, with at least the following attributes:
  • tick: the operation's tick attribute
  • key: the key of the document/edge or the key used in the deletion operation
  • rev: the revision id of the document/edge or the deletion operation
  • data: the actual document/edge data for types 2300 and 2301. The full document/edge data will be returned even for updates.
  • type: the type of entry. Possible values for type are:
    - 2300: document insertion/update
    - 2301: edge insertion/update
    - 2302: document/edge deletion
Note: there will be no distinction between inserts and updates when calling this method.

Example: Empty collection:

shell> curl --dump - http://localhost:8529/_api/replication/dump?collection=testCollection\n\nHTTP/1.1 204 No Content\ncontent-type: application/x-arango-dump; charset=utf-8\nx-arango-replication-checkmore: false\nx-arango-replication-lastincluded: 0\n\n

\n
Example: Non-empty collection:

shell> curl --dump - http://localhost:8529/_api/replication/dump?collection=testCollection\n\nHTTP/1.1 200 OK\ncontent-type: application/x-arango-dump; charset=utf-8\nx-arango-replication-checkmore: false\nx-arango-replication-lastincluded: 766695879\n\n\"{\\\"tick\\\":\\\"766040519\\\",\\\"type\\\":2300,\\\"key\\\":\\\"123456\\\",\\\"rev\\\":\\\"765974983\\\",\\\"data\\\":{\\\"_key\\\":\\\"123456\\\",\\\"_rev\\\":\\\"765974983\\\",\\\"c\\\":false,\\\"b\\\":1,\\\"d\\\":\\\"additional value\\\"}}\\n{\\\"tick\\\":\\\"766499271\\\",\\\"type\\\":2302,\\\"key\\\":\\\"foobar\\\",\\\"rev\\\":\\\"766433735\\\"}\\n{\\\"tick\\\":\\\"766695879\\\",\\\"type\\\":2302,\\\"key\\\":\\\"abcdef\\\",\\\"rev\\\":\\\"766630343\\\"}\\n\"\n

\n
", + "parameters": [ + { + "description": "The name or id of the collection to dump.
", + "in": "query", + "name": "collection", + "required": true, + "type": "string" + }, + { + "description": "Lower bound tick value for results.
", + "in": "query", + "name": "from", + "required": false, + "type": "number" + }, + { + "description": "Upper bound tick value for results.
", + "in": "query", + "name": "to", + "required": false, + "type": "number" + }, + { + "description": "Approximate maximum size of the returned result.
", + "in": "query", + "name": "chunkSize", + "required": false, + "type": "number" + }, + { + "description": "Include system collections in the result. The default value is true.
", + "in": "query", + "name": "includeSystem", + "required": false, + "type": "boolean" + }, + { + "description": "Whether or not to include tick values in the dump. The default value is true.
", + "in": "query", + "name": "ticks", + "required": false, + "type": "boolean" + }, + { + "description": "Whether or not to flush the WAL before dumping. The default value is true.
", + "in": "query", + "name": "flush", + "required": false, + "type": "boolean" + } + ], + "responses": { + "200": { + "description": "is returned if the request was executed successfully and data was returned. The header `x-arango-replication-lastincluded` is set to the tick of the last document returned.
" + }, + "204": { + "description": "is returned if the request was executed successfully, but there was no content available. The header `x-arango-replication-lastincluded` is `0` in this case.
" + }, + "400": { + "description": "is returned if either the from or to values are invalid.
" + }, + "404": { + "description": "is returned when the collection could not be found.
" + }, + "405": { + "description": "is returned when an invalid HTTP method is used.
" + }, + "500": { + "description": "is returned if an error occurred while assembling the response.
" + } + }, + "summary": " Return data of a collection", + "tags": [ + "Replication" + ], + "x-examples": [], + "x-filename": "Replication - arangod/RestHandler/RestReplicationHandler.cpp" + } + }, + "/_api/replication/inventory": { + "get": { + "description": "\n\nReturns the array of collections and indexes available on the server. This array can be used by replication clients to initiate an initial sync with the server.
The response will contain a JSON object with the collection and state and tick attributes.
collections is a array of collections with the following sub-attributes:
  • parameters: the collection properties
  • indexes: a array of the indexes of a the collection. Primary indexes and edges indexes are not included in this array.
The state attribute contains the current state of the replication logger. It contains the following sub-attributes:
  • running: whether or not the replication logger is currently active. Note: since ArangoDB 2.2, the value will always be true
  • lastLogTick: the value of the last tick the replication logger has written
  • time: the current time on the server
Replication clients should note the lastLogTick value returned. They can then fetch collections' data using the dump method up to the value of lastLogTick, and query the continuous replication log for log events after this tick value.
To create a full copy of the collections on the server, a replication client can execute these steps:
  • call the /inventory API method. This returns the lastLogTick value and the array of collections and indexes from the server.
  • for each collection returned by /inventory, create the collection locally and call /dump to stream the collection data to the client, up to the value of lastLogTick. After that, the client can create the indexes on the collections as they were reported by /inventory.
If the clients wants to continuously stream replication log events from the logger server, the following additional steps need to be carried out:
  • the client should call /logger-follow initially to fetch the first batch of replication events that were logged after the client's call to /inventory.
    The call to /logger-follow should use a from parameter with the value of the lastLogTick as reported by /inventory. The call to /logger-follow will return the x-arango-replication-lastincluded which will contain the last tick value included in the response.
  • the client can then continuously call /logger-follow to incrementally fetch new replication events that occurred after the last transfer.
    Calls should use a from parameter with the value of the x-arango-replication-lastincluded header of the previous response. If there are no more replication events, the response will be empty and clients can go to sleep for a while and try again later.
Note: on a coordinator, this request must have the URL parameter DBserver which must be an ID of a DBserver. The very same request is forwarded synchronously to that DBserver. It is an error if this attribute is not bound in the coordinator case.

Example:

shell> curl --dump - http://localhost:8529/_api/replication/inventory\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"collections\" : [ \n    { \n      \"parameters\" : { \n        \"version\" : 5, \n        \"type\" : 2, \n        \"cid\" : \"7199175\", \n        \"indexBuckets\" : 8, \n        \"deleted\" : false, \n        \"doCompact\" : true, \n        \"maximalSize\" : 1048576, \n        \"name\" : \"_apps\", \n        \"isVolatile\" : false, \n        \"waitForSync\" : false \n      }, \n      \"indexes\" : [ \n        { \n          \"id\" : \"7461319\", \n          \"type\" : \"hash\", \n          \"fields\" : [ \n            \"mount\" \n          ], \n          \"selectivityEstimate\" : 1, \n          \"unique\" : true, \n          \"sparse\" : false \n        } \n      ] \n    }, \n    { \n      \"parameters\" : { \n        \"version\" : 5, \n        \"type\" : 2, \n        \"cid\" : \"4446663\", \n        \"indexBuckets\" : 8, \n        \"deleted\" : false, \n        \"doCompact\" : true, \n        \"maximalSize\" : 4194304, \n        \"name\" : \"_aqlfunctions\", \n        \"isVolatile\" : false, \n        \"waitForSync\" : false \n      }, \n      \"indexes\" : [ ] \n    }, \n    { \n      \"parameters\" : { \n        \"version\" : 5, \n        \"type\" : 2, \n        \"cid\" : \"2087367\", \n        \"indexBuckets\" : 8, \n        \"deleted\" : false, \n        \"doCompact\" : true, \n        \"maximalSize\" : 1048576, \n        \"name\" : \"_graphs\", \n        \"isVolatile\" : false, \n        \"waitForSync\" : false \n      }, \n      \"indexes\" : [ ] \n    }, \n    { \n      \"parameters\" : { \n        \"version\" : 5, \n        \"type\" : 2, \n        \"cid\" : \"2218439\", \n        \"indexBuckets\" : 8, \n        \"deleted\" : false, \n        \"doCompact\" : true, \n        \"maximalSize\" : 1048576, \n        \"name\" : \"_modules\", \n        \"isVolatile\" : false, \n        \"waitForSync\" : false \n      }, \n      \"indexes\" : [ ] \n    }, \n    { \n      \"parameters\" : { \n        \"version\" : 5, \n        \"type\" : 2, \n        \"cid\" : \"2349511\", \n        \"indexBuckets\" : 8, \n        \"deleted\" : false, \n        \"doCompact\" : true, \n        \"maximalSize\" : 33554432, \n        \"name\" : \"_routing\", \n        \"isVolatile\" : false, \n        \"waitForSync\" : false \n      }, \n      \"indexes\" : [ ] \n    }, \n    { \n      \"parameters\" : { \n        \"version\" : 5, \n        \"type\" : 2, \n        \"cid\" : \"14145991\", \n        \"indexBuckets\" : 8, \n        \"deleted\" : false, \n        \"doCompact\" : true, \n        \"maximalSize\" : 1048576, \n        \"name\" : \"_sessions\", \n        \"isVolatile\" : false, \n        \"waitForSync\" : false \n      }, \n      \"indexes\" : [ ] \n    }, \n    { \n      \"parameters\" : { \n        \"version\" : 5, \n        \"type\" : 2, \n        \"cid\" : \"14866887\", \n        \"indexBuckets\" : 8, \n        \"deleted\" : false, \n        \"doCompact\" : true, \n        \"maximalSize\" : 1048576, \n        \"name\" : \"_system_users_users\", \n        \"isVolatile\" : false, \n        \"waitForSync\" : false \n      }, \n      \"indexes\" : [ ] \n    }, \n    { \n      \"parameters\" : { \n        \"version\" : 5, \n        \"type\" : 2, \n        \"cid\" : \"252359\", \n        \"indexBuckets\" : 8, \n        \"deleted\" : false, \n        \"doCompact\" : true, \n        \"maximalSize\" : 4194304, \n        \"name\" : \"_users\", \n        \"isVolatile\" : false, \n        \"waitForSync\" : false \n      }, \n      \"indexes\" : [ \n        { \n          \"id\" : \"580039\", \n          \"type\" : \"hash\", \n          \"fields\" : [ \n            \"user\" \n          ], \n          \"selectivityEstimate\" : 1, \n          \"unique\" : true, \n          \"sparse\" : true \n        } \n      ] \n    }, \n    { \n      \"parameters\" : { \n        \"version\" : 5, \n        \"type\" : 2, \n        \"cid\" : \"22206919\", \n        \"indexBuckets\" : 8, \n        \"deleted\" : false, \n        \"doCompact\" : true, \n        \"maximalSize\" : 1048576, \n        \"name\" : \"animals\", \n        \"isVolatile\" : false, \n        \"waitForSync\" : false \n      }, \n      \"indexes\" : [ ] \n    }, \n    { \n      \"parameters\" : { \n        \"version\" : 5, \n        \"type\" : 2, \n        \"cid\" : \"21354951\", \n        \"indexBuckets\" : 8, \n        \"deleted\" : false, \n        \"doCompact\" : true, \n        \"maximalSize\" : 1048576, \n        \"name\" : \"demo\", \n        \"isVolatile\" : false, \n        \"waitForSync\" : false \n      }, \n      \"indexes\" : [ ] \n    } \n  ], \n  \"state\" : { \n    \"running\" : true, \n    \"lastLogTick\" : \"767351239\", \n    \"totalEvents\" : 4726, \n    \"time\" : \"2015-09-30T15:40:13Z\" \n  }, \n  \"tick\" : \"767351239\" \n}\n

\n
Example: With some additional indexes:

shell> curl --dump - http://localhost:8529/_api/replication/inventory\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"collections\" : [ \n    { \n      \"parameters\" : { \n        \"version\" : 5, \n        \"type\" : 2, \n        \"cid\" : \"7199175\", \n        \"indexBuckets\" : 8, \n        \"deleted\" : false, \n        \"doCompact\" : true, \n        \"maximalSize\" : 1048576, \n        \"name\" : \"_apps\", \n        \"isVolatile\" : false, \n        \"waitForSync\" : false \n      }, \n      \"indexes\" : [ \n        { \n          \"id\" : \"7461319\", \n          \"type\" : \"hash\", \n          \"fields\" : [ \n            \"mount\" \n          ], \n          \"selectivityEstimate\" : 1, \n          \"unique\" : true, \n          \"sparse\" : false \n        } \n      ] \n    }, \n    { \n      \"parameters\" : { \n        \"version\" : 5, \n        \"type\" : 2, \n        \"cid\" : \"4446663\", \n        \"indexBuckets\" : 8, \n        \"deleted\" : false, \n        \"doCompact\" : true, \n        \"maximalSize\" : 4194304, \n        \"name\" : \"_aqlfunctions\", \n        \"isVolatile\" : false, \n        \"waitForSync\" : false \n      }, \n      \"indexes\" : [ ] \n    }, \n    { \n      \"parameters\" : { \n        \"version\" : 5, \n        \"type\" : 2, \n        \"cid\" : \"2087367\", \n        \"indexBuckets\" : 8, \n        \"deleted\" : false, \n        \"doCompact\" : true, \n        \"maximalSize\" : 1048576, \n        \"name\" : \"_graphs\", \n        \"isVolatile\" : false, \n        \"waitForSync\" : false \n      }, \n      \"indexes\" : [ ] \n    }, \n    { \n      \"parameters\" : { \n        \"version\" : 5, \n        \"type\" : 2, \n        \"cid\" : \"2218439\", \n        \"indexBuckets\" : 8, \n        \"deleted\" : false, \n        \"doCompact\" : true, \n        \"maximalSize\" : 1048576, \n        \"name\" : \"_modules\", \n        \"isVolatile\" : false, \n        \"waitForSync\" : false \n      }, \n      \"indexes\" : [ ] \n    }, \n    { \n      \"parameters\" : { \n        \"version\" : 5, \n        \"type\" : 2, \n        \"cid\" : \"2349511\", \n        \"indexBuckets\" : 8, \n        \"deleted\" : false, \n        \"doCompact\" : true, \n        \"maximalSize\" : 33554432, \n        \"name\" : \"_routing\", \n        \"isVolatile\" : false, \n        \"waitForSync\" : false \n      }, \n      \"indexes\" : [ ] \n    }, \n    { \n      \"parameters\" : { \n        \"version\" : 5, \n        \"type\" : 2, \n        \"cid\" : \"14145991\", \n        \"indexBuckets\" : 8, \n        \"deleted\" : false, \n        \"doCompact\" : true, \n        \"maximalSize\" : 1048576, \n        \"name\" : \"_sessions\", \n        \"isVolatile\" : false, \n        \"waitForSync\" : false \n      }, \n      \"indexes\" : [ ] \n    }, \n    { \n      \"parameters\" : { \n        \"version\" : 5, \n        \"type\" : 2, \n        \"cid\" : \"14866887\", \n        \"indexBuckets\" : 8, \n        \"deleted\" : false, \n        \"doCompact\" : true, \n        \"maximalSize\" : 1048576, \n        \"name\" : \"_system_users_users\", \n        \"isVolatile\" : false, \n        \"waitForSync\" : false \n      }, \n      \"indexes\" : [ ] \n    }, \n    { \n      \"parameters\" : { \n        \"version\" : 5, \n        \"type\" : 2, \n        \"cid\" : \"252359\", \n        \"indexBuckets\" : 8, \n        \"deleted\" : false, \n        \"doCompact\" : true, \n        \"maximalSize\" : 4194304, \n        \"name\" : \"_users\", \n        \"isVolatile\" : false, \n        \"waitForSync\" : false \n      }, \n      \"indexes\" : [ \n        { \n          \"id\" : \"580039\", \n          \"type\" : \"hash\", \n          \"fields\" : [ \n            \"user\" \n          ], \n          \"selectivityEstimate\" : 1, \n          \"unique\" : true, \n          \"sparse\" : true \n        } \n      ] \n    }, \n    { \n      \"parameters\" : { \n        \"version\" : 5, \n        \"type\" : 2, \n        \"cid\" : \"22206919\", \n        \"indexBuckets\" : 8, \n        \"deleted\" : false, \n        \"doCompact\" : true, \n        \"maximalSize\" : 1048576, \n        \"name\" : \"animals\", \n        \"isVolatile\" : false, \n        \"waitForSync\" : false \n      }, \n      \"indexes\" : [ ] \n    }, \n    { \n      \"parameters\" : { \n        \"version\" : 5, \n        \"type\" : 2, \n        \"cid\" : \"21354951\", \n        \"indexBuckets\" : 8, \n        \"deleted\" : false, \n        \"doCompact\" : true, \n        \"maximalSize\" : 1048576, \n        \"name\" : \"demo\", \n        \"isVolatile\" : false, \n        \"waitForSync\" : false \n      }, \n      \"indexes\" : [ ] \n    }, \n    { \n      \"parameters\" : { \n        \"version\" : 5, \n        \"type\" : 2, \n        \"cid\" : \"767416775\", \n        \"indexBuckets\" : 8, \n        \"deleted\" : false, \n        \"doCompact\" : true, \n        \"maximalSize\" : 1048576, \n        \"name\" : \"IndexedCollection1\", \n        \"isVolatile\" : false, \n        \"waitForSync\" : false \n      }, \n      \"indexes\" : [ \n        { \n          \"id\" : \"767678919\", \n          \"type\" : \"hash\", \n          \"fields\" : [ \n            \"name\" \n          ], \n          \"selectivityEstimate\" : 1, \n          \"unique\" : false, \n          \"sparse\" : false \n        }, \n        { \n          \"id\" : \"768006599\", \n          \"type\" : \"skiplist\", \n          \"fields\" : [ \n            \"a\", \n            \"b\" \n          ], \n          \"unique\" : true, \n          \"sparse\" : false \n        }, \n        { \n          \"id\" : \"768203207\", \n          \"type\" : \"cap\", \n          \"size\" : 500, \n          \"byteSize\" : 0, \n          \"unique\" : false \n        } \n      ] \n    }, \n    { \n      \"parameters\" : { \n        \"version\" : 5, \n        \"type\" : 2, \n        \"cid\" : \"768399815\", \n        \"indexBuckets\" : 8, \n        \"deleted\" : false, \n        \"doCompact\" : true, \n        \"maximalSize\" : 1048576, \n        \"name\" : \"IndexedCollection2\", \n        \"isVolatile\" : false, \n        \"waitForSync\" : false \n      }, \n      \"indexes\" : [ \n        { \n          \"id\" : \"768596423\", \n          \"type\" : \"fulltext\", \n          \"fields\" : [ \n            \"text\" \n          ], \n          \"unique\" : false, \n          \"sparse\" : true, \n          \"minLength\" : 10 \n        }, \n        { \n          \"id\" : \"768924103\", \n          \"type\" : \"skiplist\", \n          \"fields\" : [ \n            \"a\" \n          ], \n          \"unique\" : false, \n          \"sparse\" : false \n        }, \n        { \n          \"id\" : \"769120711\", \n          \"type\" : \"cap\", \n          \"size\" : 0, \n          \"byteSize\" : 1048576, \n          \"unique\" : false \n        } \n      ] \n    } \n  ], \n  \"state\" : { \n    \"running\" : true, \n    \"lastLogTick\" : \"767351239\", \n    \"totalEvents\" : 4739, \n    \"time\" : \"2015-09-30T15:40:13Z\" \n  }, \n  \"tick\" : \"769251783\" \n}\n

\n
", + "parameters": [ + { + "description": "Include system collections in the result. The default value is true.
", + "in": "query", + "name": "includeSystem", + "required": false, + "type": "boolean" + } + ], + "responses": { + "200": { + "description": "is returned if the request was executed successfully.
" + }, + "405": { + "description": "is returned when an invalid HTTP method is used.
" + }, + "500": { + "description": "is returned if an error occurred while assembling the response.
" + } + }, + "summary": " Return inventory of collections and indexes", + "tags": [ + "Replication" + ], + "x-examples": [], + "x-filename": "Replication - arangod/RestHandler/RestReplicationHandler.cpp" + } + }, + "/_api/replication/logger-first-tick": { + "get": { + "description": "\n\nReturns the first available tick value that can be served from the server's replication log. This method can be called by replication clients after to determine if certain data (identified by a tick value) is still available for replication.
The result is a JSON object containing the attribute firstTick. This attribute contains the minimum tick value available in the server's replication log.
Note: this method is not supported on a coordinator in a cluster.

Example: Returning the first available tick

shell> curl --dump - http://localhost:8529/_api/replication/logger-first-tick\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n\"{\\\"firstTick\\\":\\\"383431\\\"}\"\n

\n
", + "parameters": [], + "responses": { + "200": { + "description": "is returned if the request was executed successfully.
" + }, + "405": { + "description": "is returned when an invalid HTTP method is used.
" + }, + "500": { + "description": "is returned if an error occurred while assembling the response.
" + }, + "501": { + "description": "is returned when this operation is called on a coordinator in a cluster.
" + } + }, + "summary": " Returns the first available tick value", + "tags": [ + "Replication" + ], + "x-examples": [], + "x-filename": "Replication - arangod/RestHandler/RestReplicationHandler.cpp" + } + }, + "/_api/replication/logger-follow": { + "get": { + "description": "\n\nReturns data from the server's replication log. This method can be called by replication clients after an initial synchronization of data. The method will return all \"recent\" log entries from the logger server, and the clients can replay and apply these entries locally so they get to the same data state as the logger server.
Clients can call this method repeatedly to incrementally fetch all changes from the logger server. In this case, they should provide the from value so they will only get returned the log events since their last fetch.
When the from URL parameter is not used, the logger server will return log entries starting at the beginning of its replication log. When the from parameter is used, the logger server will only return log entries which have higher tick values than the specified from value (note: the log entry with a tick value equal to from will be excluded). Use the from value when incrementally fetching log data.
The to URL parameter can be used to optionally restrict the upper bound of the result to a certain tick value. If used, the result will contain only log events with tick values up to (including) to. In incremental fetching, there is no need to use the to parameter. It only makes sense in special situations, when only parts of the change log are required.
The chunkSize URL parameter can be used to control the size of the result. It must be specified in bytes. The chunkSize value will only be honored approximately. Otherwise a too low chunkSize value could cause the server to not be able to put just one log entry into the result and return it. Therefore, the chunkSize value will only be consulted after a log entry has been written into the result. If the result size is then bigger than chunkSize, the server will respond with as many log entries as there are in the response already. If the result size is still smaller than chunkSize, the server will try to return more data if there's more data left to return.
If chunkSize is not specified, some server-side default value will be used.
The Content-Type of the result is application/x-arango-dump. This is an easy-to-process format, with all log events going onto separate lines in the response body. Each log event itself is a JSON object, with at least the following attributes:
  • tick: the log event tick value
  • type: the log event type
Individual log events will also have additional attributes, depending on the event type. A few common attributes which are used for multiple events types are:
  • cid: id of the collection the event was for
  • tid: id of the transaction the event was contained in
  • key: document key
  • rev: document revision id
  • data: the original document data
A more detailed description of the individual replication event types and their data structures can be found in the manual.
The response will also contain the following HTTP headers:
  • x-arango-replication-active: whether or not the logger is active. Clients can use this flag as an indication for their polling frequency. If the logger is not active and there are no more replication events available, it might be sensible for a client to abort, or to go to sleep for a long time and try again later to check whether the logger has been activated.
  • x-arango-replication-lastincluded: the tick value of the last included value in the result. In incremental log fetching, this value can be used as the from value for the following request. Note that if the result is empty, the value will be 0. This value should not be used as from value by clients in the next request (otherwise the server would return the log events from the start of the log again).
  • x-arango-replication-lasttick: the last tick value the logger server has logged (not necessarily included in the result). By comparing the the last tick and last included tick values, clients have an approximate indication of how many events there are still left to fetch.
  • x-arango-replication-checkmore: whether or not there already exists more log data which the client could fetch immediately. If there is more log data available, the client could call logger-follow again with an adjusted from value to fetch remaining log entries until there are no more.
    If there isn't any more log data to fetch, the client might decide to go to sleep for a while before calling the logger again.
Note: this method is not supported on a coordinator in a cluster.

Example: No log events available

shell> curl --dump - http://localhost:8529/_api/replication/logger-follow?from=770628039\n\nHTTP/1.1 204 No Content\ncontent-type: application/x-arango-dump; charset=utf-8\nx-arango-replication-active: true\nx-arango-replication-checkmore: false\nx-arango-replication-frompresent: true\nx-arango-replication-lastincluded: 0\nx-arango-replication-lasttick: 770628039\n\n

\n
Example: A few log events

shell> curl --dump - http://localhost:8529/_api/replication/logger-follow?from=770628039\n\nHTTP/1.1 200 OK\ncontent-type: application/x-arango-dump; charset=utf-8\nx-arango-replication-active: true\nx-arango-replication-checkmore: false\nx-arango-replication-frompresent: true\nx-arango-replication-lastincluded: 771873223\nx-arango-replication-lasttick: 771873223\n\n\"{\\\"tick\\\":\\\"770759111\\\",\\\"type\\\":2000,\\\"database\\\":\\\"121287\\\",\\\"cid\\\":\\\"770693575\\\",\\\"collection\\\":{\\\"version\\\":5,\\\"type\\\":2,\\\"cid\\\":\\\"770693575\\\",\\\"indexBuckets\\\":8,\\\"deleted\\\":false,\\\"doCompact\\\":true,\\\"maximalSize\\\":1048576,\\\"name\\\":\\\"products\\\",\\\"isVolatile\\\":false,\\\"waitForSync\\\":false}}\\n{\\\"tick\\\":\\\"771086791\\\",\\\"type\\\":2300,\\\"database\\\":\\\"121287\\\",\\\"cid\\\":\\\"770693575\\\",\\\"tid\\\":\\\"0\\\",\\\"key\\\":\\\"p1\\\",\\\"rev\\\":\\\"771021255\\\",\\\"data\\\":{\\\"_key\\\":\\\"p1\\\",\\\"_rev\\\":\\\"771021255\\\",\\\"name\\\":\\\"flux compensator\\\"}}\\n{\\\"tick\\\":\\\"771414471\\\",\\\"type\\\":2300,\\\"database\\\":\\\"121287\\\",\\\"cid\\\":\\\"770693575\\\",\\\"tid\\\":\\\"0\\\",\\\"key\\\":\\\"p2\\\",\\\"rev\\\":\\\"771348935\\\",\\\"data\\\":{\\\"_key\\\":\\\"p2\\\",\\\"_rev\\\":\\\"771348935\\\",\\\"hp\\\":5100,\\\"name\\\":\\\"hybrid hovercraft\\\"}}\\n{\\\"tick\\\":\\\"771611079\\\",\\\"type\\\":2302,\\\"database\\\":\\\"121287\\\",\\\"cid\\\":\\\"770693575\\\",\\\"tid\\\":\\\"0\\\",\\\"key\\\":\\\"p1\\\",\\\"rev\\\":\\\"771545543\\\"}\\n{\\\"tick\\\":\\\"771807687\\\",\\\"type\\\":2300,\\\"database\\\":\\\"121287\\\",\\\"cid\\\":\\\"770693575\\\",\\\"tid\\\":\\\"0\\\",\\\"key\\\":\\\"p2\\\",\\\"rev\\\":\\\"771742151\\\",\\\"data\\\":{\\\"_key\\\":\\\"p2\\\",\\\"_rev\\\":\\\"771742151\\\"}}\\n{\\\"tick\\\":\\\"771873223\\\",\\\"type\\\":2001,\\\"database\\\":\\\"121287\\\",\\\"cid\\\":\\\"770693575\\\"}\\n\"\n

\n
Example: More events than would fit into the response

shell> curl --dump - http://localhost:8529/_api/replication/logger-follow?from=769317319&chunkSize=400\n\nHTTP/1.1 200 OK\ncontent-type: application/x-arango-dump; charset=utf-8\nx-arango-replication-active: true\nx-arango-replication-checkmore: true\nx-arango-replication-frompresent: true\nx-arango-replication-lastincluded: 769841607\nx-arango-replication-lasttick: 770628039\n\n\"{\\\"tick\\\":\\\"769382855\\\",\\\"type\\\":2001,\\\"database\\\":\\\"121287\\\",\\\"cid\\\":\\\"768399815\\\"}\\n{\\\"tick\\\":\\\"769513927\\\",\\\"type\\\":2000,\\\"database\\\":\\\"121287\\\",\\\"cid\\\":\\\"769448391\\\",\\\"collection\\\":{\\\"version\\\":5,\\\"type\\\":2,\\\"cid\\\":\\\"769448391\\\",\\\"indexBuckets\\\":8,\\\"deleted\\\":false,\\\"doCompact\\\":true,\\\"maximalSize\\\":1048576,\\\"name\\\":\\\"products\\\",\\\"isVolatile\\\":false,\\\"waitForSync\\\":false}}\\n{\\\"tick\\\":\\\"769841607\\\",\\\"type\\\":2300,\\\"database\\\":\\\"121287\\\",\\\"cid\\\":\\\"769448391\\\",\\\"tid\\\":\\\"0\\\",\\\"key\\\":\\\"p1\\\",\\\"rev\\\":\\\"769776071\\\",\\\"data\\\":{\\\"_key\\\":\\\"p1\\\",\\\"_rev\\\":\\\"769776071\\\",\\\"name\\\":\\\"flux compensator\\\"}}\\n\"\n

\n
", + "parameters": [ + { + "description": "Lower bound tick value for results.
", + "in": "query", + "name": "from", + "required": false, + "type": "number" + }, + { + "description": "Upper bound tick value for results.
", + "in": "query", + "name": "to", + "required": false, + "type": "number" + }, + { + "description": "Approximate maximum size of the returned result.
", + "in": "query", + "name": "chunkSize", + "required": false, + "type": "number" + }, + { + "description": "Include system collections in the result. The default value is true.
", + "in": "query", + "name": "includeSystem", + "required": false, + "type": "boolean" + } + ], + "responses": { + "200": { + "description": "is returned if the request was executed successfully, and there are log events available for the requested range. The response body will not be empty in this case.
" + }, + "204": { + "description": "is returned if the request was executed successfully, but there are no log events available for the requested range. The response body will be empty in this case.
" + }, + "400": { + "description": "is returned if either the from or to values are invalid.
" + }, + "405": { + "description": "is returned when an invalid HTTP method is used.
" + }, + "500": { + "description": "is returned if an error occurred while assembling the response.
" + }, + "501": { + "description": "is returned when this operation is called on a coordinator in a cluster.
" + } + }, + "summary": " Returns log entries", + "tags": [ + "Replication" + ], + "x-examples": [], + "x-filename": "Replication - arangod/RestHandler/RestReplicationHandler.cpp" + } + }, + "/_api/replication/logger-state": { + "get": { + "description": "\n\nReturns the current state of the server's replication logger. The state will include information about whether the logger is running and about the last logged tick value. This tick value is important for incremental fetching of data.
The body of the response contains a JSON object with the following attributes:
  • state: the current logger state as a JSON object with the following sub-attributes:
    - running: whether or not the logger is running
    - lastLogTick: the tick value of the latest tick the logger has logged. This value can be used for incremental fetching of log data.
    - totalEvents: total number of events logged since the server was started. The value is not reset between multiple stops and re-starts of the logger.
    - time: the current date and time on the logger server
  • server: a JSON object with the following sub-attributes:
    - version: the logger server's version
    - serverId: the logger server's id
  • clients: returns the last fetch status by replication clients connected to the logger. Each client is returned as a JSON object with the following attributes:
    - serverId: server id of client
    - lastServedTick: last tick value served to this client via the logger-follow API
    - time: date and time when this client last called the logger-follow API

Example: Returns the state of the replication logger.

shell> curl --dump - http://localhost:8529/_api/replication/logger-state\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"state\" : { \n    \"running\" : true, \n    \"lastLogTick\" : \"771873223\", \n    \"totalEvents\" : 4761, \n    \"time\" : \"2015-09-30T15:40:17Z\" \n  }, \n  \"server\" : { \n    \"version\" : \"2.7.0-devel\", \n    \"serverId\" : \"4865533481307\" \n  }, \n  \"clients\" : [ ] \n}\n

\n
", + "parameters": [], + "responses": { + "200": { + "description": "is returned if the logger state could be determined successfully.
" + }, + "405": { + "description": "is returned when an invalid HTTP method is used.
" + }, + "500": { + "description": "is returned if the logger state could not be determined.
" + } + }, + "summary": " Return replication logger state", + "tags": [ + "Replication" + ], + "x-examples": [], + "x-filename": "Replication - arangod/RestHandler/RestReplicationHandler.cpp" + } + }, + "/_api/replication/logger-tick-ranges": { + "get": { + "description": "\n\nReturns the currently available ranges of tick values for all currently available WAL logfiles. The tick values can be used to determine if certain data (identified by tick value) are still available for replication.
The body of the response contains a JSON array. Each array member is an object that describes a single logfile. Each object has the following attributes:
*datafile: name of the logfile
*status: status of the datafile, in textual form (e.g. \"sealed\", \"open\")
*tickMin: minimum tick value contained in logfile
*tickMax: maximum tick value contained in logfile

Example: Returns the available tick ranges.

shell> curl --dump - http://localhost:8529/_api/replication/logger-tick-ranges\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n[ \n  { \n    \"datafile\" : \"/tmp/vocdir.2239/journals/logfile-186823.db\", \n    \"status\" : \"collected\", \n    \"tickMin\" : \"383431\", \n    \"tickMax\" : \"642505159\" \n  }, \n  { \n    \"datafile\" : \"/tmp/vocdir.2239/journals/logfile-642636231.db\", \n    \"status\" : \"collected\", \n    \"tickMin\" : \"642963911\", \n    \"tickMax\" : \"645716423\" \n  }, \n  { \n    \"datafile\" : \"/tmp/vocdir.2239/journals/logfile-645847495.db\", \n    \"status\" : \"collected\", \n    \"tickMin\" : \"645978567\", \n    \"tickMax\" : \"766695879\" \n  }, \n  { \n    \"datafile\" : \"/tmp/vocdir.2239/journals/logfile-766826951.db\", \n    \"status\" : \"collected\", \n    \"tickMin\" : \"766958023\", \n    \"tickMax\" : \"767089095\" \n  }, \n  { \n    \"datafile\" : \"/tmp/vocdir.2239/journals/logfile-767220167.db\", \n    \"status\" : \"open\", \n    \"tickMin\" : \"767351239\", \n    \"tickMax\" : \"771873223\" \n  } \n]\n

\n
", + "parameters": [], + "responses": { + "200": { + "description": "is returned if the tick ranges could be determined successfully.
" + }, + "405": { + "description": "is returned when an invalid HTTP method is used.
" + }, + "500": { + "description": "is returned if the logger state could not be determined.
" + }, + "501": { + "description": "is returned when this operation is called on a coordinator in a cluster.
" + } + }, + "summary": " Return the tick ranges available in the WAL logfiles", + "tags": [ + "Replication" + ], + "x-examples": [], + "x-filename": "Replication - arangod/RestHandler/RestReplicationHandler.cpp" + } + }, + "/_api/replication/make-slave": { + "put": { + "description": "**A json post document with these Properties is required:**
  • username: an optional ArangoDB username to use when connecting to the master.
  • includeSystem: whether or not system collection operations will be applied
  • endpoint: the master endpoint to connect to (e.g. \"tcp://192.168.173.13:8529\").
  • verbose: if set to true, then a log line will be emitted for all operations performed by the replication applier. This should be used for debugging replication problems only.
  • connectTimeout: the timeout (in seconds) when attempting to connect to the endpoint. This value is used for each connection attempt.
  • database: the database name on the master (if not specified, defaults to the name of the local current database).
  • requireFromPresent: if set to true, then the replication applier will check at start of its continuous replication if the start tick from the dump phase is still present on the master. If not, then there would be data loss. If requireFromPresent is true, the replication applier will abort with an appropriate error message. If set to false, then the replication applier will still start, and ignore the data loss.
  • requestTimeout: the timeout (in seconds) for individual requests to the endpoint.
  • restrictType: an optional string value for collection filtering. When specified, the allowed values are include or exclude.
  • restrictCollections: an optional array of collections for use with restrictType. If restrictType is include, only the specified collections will be sychronised. If restrictType is exclude, all but the specified collections will be synchronized. of type string
  • adaptivePolling: whether or not the replication applier will use adaptive polling.
  • maxConnectRetries: the maximum number of connection attempts the applier will make in a row. If the applier cannot establish a connection to the endpoint in this number of attempts, it will stop itself.
  • password: the password to use when connecting to the master.
  • chunkSize: the requested maximum size for log transfer packets that is used when the endpoint is contacted.
\n\nStarts a full data synchronization from a remote endpoint into the local ArangoDB database and afterwards starts the continuous replication. The operation works on a per-database level.
All local database data will be removed prior to the synchronization.
In case of success, the body of the response is a JSON object with the following attributes:
  • state: a JSON object with the following sub-attributes:
    - running: whether or not the applier is active and running
    - lastAppliedContinuousTick: the last tick value from the continuous replication log the applier has applied.
    - lastProcessedContinuousTick: the last tick value from the continuous replication log the applier has processed.
    Regularly, the last applied and last processed tick values should be identical. For transactional operations, the replication applier will first process incoming log events before applying them, so the processed tick value might be higher than the applied tick value. This will be the case until the applier encounters the transaction commit log event for the transaction.
    - lastAvailableContinuousTick: the last tick value the logger server can provide.
    - time: the time on the applier server.
    - totalRequests: the total number of requests the applier has made to the endpoint.
    - totalFailedConnects: the total number of failed connection attempts the applier has made.
    - totalEvents: the total number of log events the applier has processed.
    - totalOperationsExcluded: the total number of log events excluded because of restrictCollections.
    - progress: a JSON object with details about the replication applier progress. It contains the following sub-attributes if there is progress to report:
    - message: a textual description of the progress
    - time: the date and time the progress was logged
    - failedConnects: the current number of failed connection attempts
    - lastError: a JSON object with details about the last error that happened on the applier. It contains the following sub-attributes if there was an error:
    - errorNum: a numerical error code
    - errorMessage: a textual error description
    - time: the date and time the error occurred
    In case no error has occurred, lastError will be empty.
  • server: a JSON object with the following sub-attributes:
    - version: the applier server's version
    - serverId: the applier server's id
  • endpoint: the endpoint the applier is connected to (if applier is active) or will connect to (if applier is currently inactive)
  • database: the name of the database the applier is connected to (if applier is active) or will connect to (if applier is currently inactive)
WARNING: calling this method will sychronize data from the collections found on the remote master to the local ArangoDB database. All data in the local collections will be purged and replaced with data from the master.
Use with caution!
Please also keep in mind that this command may take a long time to complete and return. This is because it will first do a full data synchronization with the master, which will take time roughly proportional to the amount of data.
Note: this method is not supported on a coordinator in a cluster.
", + "parameters": [ + { + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "$ref": "#/definitions/JSF_put_api_replication_makeSlave" + }, + "x-description-offset": 59 + } + ], + "responses": { + "200": { + "description": "is returned if the request was executed successfully.
" + }, + "400": { + "description": "is returned if the configuration is incomplete or malformed.
" + }, + "405": { + "description": "is returned when an invalid HTTP method is used.
" + }, + "500": { + "description": "is returned if an error occurred during sychronization or when starting the continuous replication.
" + }, + "501": { + "description": "is returned when this operation is called on a coordinator in a cluster.
" + } + }, + "summary": " Turn the server into a slave of another", + "tags": [ + "Replication" + ], + "x-examples": [], + "x-filename": "Replication - arangod/RestHandler/RestReplicationHandler.cpp" + } + }, + "/_api/replication/server-id": { + "get": { + "description": "\n\nReturns the servers id. The id is also returned by other replication API methods, and this method is an easy means of determining a server's id.
The body of the response is a JSON object with the attribute serverId. The server id is returned as a string.

Example:

shell> curl --dump - http://localhost:8529/_api/replication/server-id\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"serverId\" : \"4865533481307\" \n}\n

\n
", + "parameters": [], + "responses": { + "200": { + "description": "is returned if the request was executed successfully.
" + }, + "405": { + "description": "is returned when an invalid HTTP method is used.
" + }, + "500": { + "description": "is returned if an error occurred while assembling the response.
" + } + }, + "summary": " Return server id", + "tags": [ + "Replication" + ], + "x-examples": [], + "x-filename": "Replication - arangod/RestHandler/RestReplicationHandler.cpp" + } + }, + "/_api/replication/sync": { + "put": { + "description": "**A json post document with these Properties is required:**
  • username: an optional ArangoDB username to use when connecting to the endpoint.
  • includeSystem: whether or not system collection operations will be applied
  • endpoint: the master endpoint to connect to (e.g. \"tcp://192.168.173.13:8529\").
  • database: the database name on the master (if not specified, defaults to the name of the local current database).
  • restrictType: an optional string value for collection filtering. When specified, the allowed values are include or exclude.
  • incremental: if set to true, then an incremental synchronization method will be used for synchronizing data in collections. This method is useful when collections already exist locally, and only the remaining differences need to be transferred from the remote endpoint. In this case, the incremental synchronization can be faster than a full synchronization. The default value is false, meaning that the complete data from the remote collection will be transferred.
  • restrictCollections: an optional array of collections for use with restrictType. If restrictType is include, only the specified collections will be sychronised. If restrictType is exclude, all but the specified collections will be synchronized. of type string
  • password: the password to use when connecting to the endpoint.
\n\nStarts a full data synchronization from a remote endpoint into the local ArangoDB database.
The sync method can be used by replication clients to connect an ArangoDB database to a remote endpoint, fetch the remote list of collections and indexes, and collection data. It will thus create a local backup of the state of data at the remote ArangoDB database. sync works on a per-database level.
sync will first fetch the list of collections and indexes from the remote endpoint. It does so by calling the inventory API of the remote database. It will then purge data in the local ArangoDB database, and after start will transfer collection data from the remote database to the local ArangoDB database. It will extract data from the remote database by calling the remote database's dump API until all data are fetched.
In case of success, the body of the response is a JSON object with the following attributes:
  • collections: an array of collections that were transferred from the endpoint
  • lastLogTick: the last log tick on the endpoint at the time the transfer was started. Use this value as the from value when starting the continuous synchronization later.
WARNING: calling this method will sychronize data from the collections found on the remote endpoint to the local ArangoDB database. All data in the local collections will be purged and replaced with data from the endpoint.
Use with caution!
Note: this method is not supported on a coordinator in a cluster.
", + "parameters": [ + { + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "$ref": "#/definitions/JSF_put_api_replication_synchronize" + }, + "x-description-offset": 59 + } + ], + "responses": { + "200": { + "description": "is returned if the request was executed successfully.
" + }, + "400": { + "description": "is returned if the configuration is incomplete or malformed.
" + }, + "405": { + "description": "is returned when an invalid HTTP method is used.
" + }, + "500": { + "description": "is returned if an error occurred during sychronization.
" + }, + "501": { + "description": "is returned when this operation is called on a coordinator in a cluster.
" + } + }, + "summary": " Synchronize data from a remote endpoint", + "tags": [ + "Replication" + ], + "x-examples": [], + "x-filename": "Replication - arangod/RestHandler/RestReplicationHandler.cpp" + } + }, + "/_api/simple/all": { + "put": { + "description": "free style json body\n\n
Returns all documents of a collections. The call expects a JSON object as body with the following attributes:
  • collection: The name of the collection to query.
  • skip: The number of documents to skip in the query (optional).
  • limit: The maximal amount of documents to return. The skip is applied before the limit restriction. (optional)
Returns a cursor containing the result, see [Http Cursor](../HttpAqlQueryCursor/README.md) for details.

Example: Limit the amount of documents using limit

shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/simple/all <<EOF\n{ \"collection\": \"products\", \"skip\": 2, \"limit\" : 2 }\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : [ \n    { \n      \"Hello3\" : \"World3\", \n      \"_id\" : \"products/774887879\", \n      \"_rev\" : \"774887879\", \n      \"_key\" : \"774887879\" \n    }, \n    { \n      \"Hello4\" : \"World4\", \n      \"_id\" : \"products/775215559\", \n      \"_rev\" : \"775215559\", \n      \"_key\" : \"775215559\" \n    } \n  ], \n  \"hasMore\" : false, \n  \"count\" : 2, \n  \"cached\" : false, \n  \"extra\" : { \n    \"stats\" : { \n      \"writesExecuted\" : 0, \n      \"writesIgnored\" : 0, \n      \"scannedFull\" : 5, \n      \"scannedIndex\" : 0, \n      \"filtered\" : 0 \n    }, \n    \"warnings\" : [ ] \n  }, \n  \"error\" : false, \n  \"code\" : 201 \n}\n

\n
Example: Using a batchSize value

shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/simple/all <<EOF\n{ \"collection\": \"products\", \"batchSize\" : 3 }\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : [ \n    { \n      \"Hello2\" : \"World2\", \n      \"_id\" : \"products/772594119\", \n      \"_rev\" : \"772594119\", \n      \"_key\" : \"772594119\" \n    }, \n    { \n      \"Hello1\" : \"World1\", \n      \"_id\" : \"products/772266439\", \n      \"_rev\" : \"772266439\", \n      \"_key\" : \"772266439\" \n    }, \n    { \n      \"Hello5\" : \"World5\", \n      \"_id\" : \"products/773577159\", \n      \"_rev\" : \"773577159\", \n      \"_key\" : \"773577159\" \n    } \n  ], \n  \"hasMore\" : true, \n  \"id\" : \"773773767\", \n  \"count\" : 5, \n  \"extra\" : { \n    \"stats\" : { \n      \"writesExecuted\" : 0, \n      \"writesIgnored\" : 0, \n      \"scannedFull\" : 5, \n      \"scannedIndex\" : 0, \n      \"filtered\" : 0 \n    }, \n    \"warnings\" : [ ] \n  }, \n  \"cached\" : false, \n  \"error\" : false, \n  \"code\" : 201 \n}\n

\n
", + "parameters": [ + { + "description": "Contains the query.
", + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "additionalProperties": {}, + "type": "object" + }, + "x-description-offset": 0 + } + ], + "responses": { + "201": { + "description": "is returned if the query was executed successfully.
" + }, + "400": { + "description": "is returned if the body does not contain a valid JSON representation of a query. The response body contains an error document in this case.
" + }, + "404": { + "description": "is returned if the collection specified by collection is unknown. The response body contains an error document in this case.
" + } + }, + "summary": " Return all documents", + "tags": [ + "Simple Queries" + ], + "x-examples": [], + "x-filename": "Simple Queries - js/actions/api-simple.js, arangod/RestHandler/RestSimpleHandler.cpp, arangod/RestHandler/RestSimpleQueryHandler.cpp" + } + }, + "/_api/simple/any": { + "put": { + "description": "\n\n
Returns a random document from a collection. The call expects a JSON object as body with the following attributes:
**A json post document with these Properties is required:**
  • collection: The identifier or name of the collection to query.
    Returns a JSON object with the document stored in the attribute document if the collection contains at least one document. If the collection is empty, the document attrbute contains null.

Example:

shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/simple/any <<EOF\n{ \n  \"collection\" : \"products\" \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"document\" : { \n    \"_id\" : \"products/776460743\", \n    \"_key\" : \"776460743\", \n    \"_rev\" : \"776460743\", \n    \"Hello2\" : \"World2\" \n  }, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
", + "parameters": [ + { + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "$ref": "#/definitions/JSA_put_api_simple_any" + }, + "x-description-offset": 185 + } + ], + "responses": { + "200": { + "description": "is returned if the query was executed successfully.
" + }, + "400": { + "description": "is returned if the body does not contain a valid JSON representation of a query. The response body contains an error document in this case.
" + }, + "404": { + "description": "is returned if the collection specified by collection is unknown. The response body contains an error document in this case.
" + } + }, + "summary": " Return a random document", + "tags": [ + "Simple Queries" + ], + "x-examples": [], + "x-filename": "Simple Queries - js/actions/api-simple.js, arangod/RestHandler/RestSimpleHandler.cpp, arangod/RestHandler/RestSimpleQueryHandler.cpp" + } + }, + "/_api/simple/by-example": { + "put": { + "description": "**A json post document with these Properties is required:**
  • skip: The number of documents to skip in the query (optional).
  • limit: The maximal amount of documents to return. The skip is applied before the limit restriction. (optional)
  • example: The example document.
  • collection: The name of the collection to query.
\n\n
This will find all documents matching a given example.
Returns a cursor containing the result, see [Http Cursor](../HttpAqlQueryCursor/README.md) for details.

Example: Matching an attribute

shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/simple/by-example <<EOF\n{ \n  \"collection\" : \"products\", \n  \"example\" : { \n    \"i\" : 1 \n  } \n}\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : [ \n    { \n      \"_id\" : \"products/779082183\", \n      \"_key\" : \"779082183\", \n      \"_rev\" : \"779082183\", \n      \"i\" : 1, \n      \"a\" : { \n        \"k\" : 2, \n        \"j\" : 2 \n      } \n    }, \n    { \n      \"_id\" : \"products/778295751\", \n      \"_key\" : \"778295751\", \n      \"_rev\" : \"778295751\", \n      \"i\" : 1, \n      \"a\" : { \n        \"k\" : 1, \n        \"j\" : 1 \n      } \n    }, \n    { \n      \"_id\" : \"products/778885575\", \n      \"_key\" : \"778885575\", \n      \"_rev\" : \"778885575\", \n      \"i\" : 1 \n    }, \n    { \n      \"_id\" : \"products/778623431\", \n      \"_key\" : \"778623431\", \n      \"_rev\" : \"778623431\", \n      \"i\" : 1, \n      \"a\" : { \n        \"j\" : 1 \n      } \n    } \n  ], \n  \"hasMore\" : false, \n  \"count\" : 4, \n  \"error\" : false, \n  \"code\" : 201 \n}\n

\n
Example: Matching an attribute which is a sub-document

shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/simple/by-example <<EOF\n{ \n  \"collection\" : \"products\", \n  \"example\" : { \n    \"a.j\" : 1 \n  } \n}\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : [ \n    { \n      \"_id\" : \"products/780589511\", \n      \"_key\" : \"780589511\", \n      \"_rev\" : \"780589511\", \n      \"i\" : 1, \n      \"a\" : { \n        \"j\" : 1 \n      } \n    }, \n    { \n      \"_id\" : \"products/780261831\", \n      \"_key\" : \"780261831\", \n      \"_rev\" : \"780261831\", \n      \"i\" : 1, \n      \"a\" : { \n        \"k\" : 1, \n        \"j\" : 1 \n      } \n    } \n  ], \n  \"hasMore\" : false, \n  \"count\" : 2, \n  \"error\" : false, \n  \"code\" : 201 \n}\n

\n
Example: Matching an attribute within a sub-document

shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/simple/by-example <<EOF\n{ \n  \"collection\" : \"products\", \n  \"example\" : { \n    \"a\" : { \n      \"j\" : 1 \n    } \n  } \n}\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : [ \n    { \n      \"_id\" : \"products/782555591\", \n      \"_key\" : \"782555591\", \n      \"_rev\" : \"782555591\", \n      \"i\" : 1, \n      \"a\" : { \n        \"j\" : 1 \n      } \n    } \n  ], \n  \"hasMore\" : false, \n  \"count\" : 1, \n  \"error\" : false, \n  \"code\" : 201 \n}\n

\n
", + "parameters": [ + { + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "$ref": "#/definitions/JSA_put_api_simple_by_example" + }, + "x-description-offset": 59 + } + ], + "responses": { + "201": { + "description": "is returned if the query was executed successfully.
" + }, + "400": { + "description": "is returned if the body does not contain a valid JSON representation of a query. The response body contains an error document in this case.
" + }, + "404": { + "description": "is returned if the collection specified by collection is unknown. The response body contains an error document in this case.
" + } + }, + "summary": " Simple query by-example", + "tags": [ + "Simple Queries" + ], + "x-examples": [], + "x-filename": "Simple Queries - js/actions/api-simple.js, arangod/RestHandler/RestSimpleHandler.cpp, arangod/RestHandler/RestSimpleQueryHandler.cpp" + } + }, + "/_api/simple/first": { + "put": { + "description": "**A json post document with these Properties is required:**
  • count: the number of documents to return at most. Specifying count is optional. If it is not specified, it defaults to 1.
  • collection: the name of the collection
\n\n
This will return the first document(s) from the collection, in the order of insertion/update time. When the count argument is supplied, the result will be an array of documents, with the \"oldest\" document being first in the result array. If the count argument is not supplied, the result is the \"oldest\" document of the collection, or null if the collection is empty.
Note: this method is not supported for sharded collections with more than one shard.

Example: Retrieving the first n documents

shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/simple/first <<EOF\n{ \n  \"collection\" : \"products\", \n  \"count\" : 2 \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : [ \n    { \n      \"_id\" : \"products/784193991\", \n      \"_key\" : \"784193991\", \n      \"_rev\" : \"784193991\", \n      \"i\" : 1, \n      \"a\" : { \n        \"k\" : 1, \n        \"j\" : 1 \n      } \n    }, \n    { \n      \"_id\" : \"products/784521671\", \n      \"_key\" : \"784521671\", \n      \"_rev\" : \"784521671\", \n      \"i\" : 1, \n      \"a\" : { \n        \"j\" : 1 \n      } \n    } \n  ], \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
Example: Retrieving the first document

shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/simple/first <<EOF\n{ \n  \"collection\" : \"products\" \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : { \n    \"_id\" : \"products/789633479\", \n    \"_key\" : \"789633479\", \n    \"_rev\" : \"789633479\", \n    \"i\" : 1, \n    \"a\" : { \n      \"k\" : 1, \n      \"j\" : 1 \n    } \n  }, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
", + "parameters": [ + { + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "$ref": "#/definitions/JSA_put_api_simple_first" + }, + "x-description-offset": 59 + } + ], + "responses": { + "200": { + "description": "is returned when the query was successfully executed.
" + }, + "400": { + "description": "is returned if the body does not contain a valid JSON representation of a query. The response body contains an error document in this case.
" + }, + "404": { + "description": "is returned if the collection specified by collection is unknown. The response body contains an error document in this case.
" + } + }, + "summary": " First document of a collection", + "tags": [ + "Simple Queries" + ], + "x-examples": [], + "x-filename": "Simple Queries - js/actions/api-simple.js, arangod/RestHandler/RestSimpleHandler.cpp, arangod/RestHandler/RestSimpleQueryHandler.cpp" + } + }, + "/_api/simple/first-example": { + "put": { + "description": "**A json post document with these Properties is required:**
  • example: The example document.
  • collection: The name of the collection to query.
\n\n
This will return the first document matching a given example.
Returns a result containing the document or HTTP 404 if no document matched the example.
If more than one document in the collection matches the specified example, only one of these documents will be returned, and it is undefined which of the matching documents is returned.

Example: If a matching document was found

shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/simple/first-example <<EOF\n{ \n  \"collection\" : \"products\", \n  \"example\" : { \n    \"i\" : 1 \n  } \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"document\" : { \n    \"_id\" : \"products/786618823\", \n    \"_key\" : \"786618823\", \n    \"_rev\" : \"786618823\", \n    \"i\" : 1, \n    \"a\" : { \n      \"k\" : 2, \n      \"j\" : 2 \n    } \n  }, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
Example: If no document was found

shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/simple/first-example <<EOF\n{ \n  \"collection\" : \"products\", \n  \"example\" : { \n    \"l\" : 1 \n  } \n}\nEOF\n\nHTTP/1.1 404 Not Found\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"error\" : true, \n  \"code\" : 404, \n  \"errorNum\" : 404, \n  \"errorMessage\" : \"no match\" \n}\n

\n
", + "parameters": [ + { + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "$ref": "#/definitions/JSA_put_api_simple_first_example" + }, + "x-description-offset": 59 + } + ], + "responses": { + "200": { + "description": "is returned when the query was successfully executed.
" + }, + "400": { + "description": "is returned if the body does not contain a valid JSON representation of a query. The response body contains an error document in this case.
" + }, + "404": { + "description": "is returned if the collection specified by collection is unknown. The response body contains an error document in this case.
" + } + }, + "summary": " Find documents matching an example", + "tags": [ + "Simple Queries" + ], + "x-examples": [], + "x-filename": "Simple Queries - js/actions/api-simple.js, arangod/RestHandler/RestSimpleHandler.cpp, arangod/RestHandler/RestSimpleQueryHandler.cpp" + } + }, + "/_api/simple/fulltext": { + "put": { + "description": "**A json post document with these Properties is required:**
  • index: The identifier of the fulltext-index to use.
  • attribute: The attribute that contains the texts.
  • collection: The name of the collection to query.
  • limit: The maximal amount of documents to return. The skip is applied before the limit restriction. (optional)
  • skip: The number of documents to skip in the query (optional).
  • query: The fulltext query. Please refer to [Fulltext queries](../SimpleQueries/FulltextQueries.html) for details.
\n\n
This will find all documents from the collection that match the fulltext query specified in query.
In order to use the fulltext operator, a fulltext index must be defined for the collection and the specified attribute.
Returns a cursor containing the result, see [Http Cursor](../HttpAqlQueryCursor/README.md) for details.
Note: the fulltext simple query is deprecated as of ArangoDB 2.6. This API may be removed in future versions of ArangoDB. The preferred way for retrieving documents from a collection using the near operator is to issue an AQL query using the FULLTEXT [AQL function](../Aql/FulltextFunctions.md) as follows:

FOR doc IN FULLTEXT(@@collection, @attributeName, @queryString, @limit) RETURN doc

Example:

shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/simple/fulltext <<EOF\n{ \n  \"collection\" : \"products\", \n  \"attribute\" : \"text\", \n  \"query\" : \"word\" \n}\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : [ \n    { \n      \"_id\" : \"products/791009735\", \n      \"_key\" : \"791009735\", \n      \"_rev\" : \"791009735\", \n      \"text\" : \"this text contains word\" \n    }, \n    { \n      \"_id\" : \"products/791206343\", \n      \"_key\" : \"791206343\", \n      \"_rev\" : \"791206343\", \n      \"text\" : \"this text also has a word\" \n    } \n  ], \n  \"hasMore\" : false, \n  \"count\" : 2, \n  \"error\" : false, \n  \"code\" : 201 \n}\n

\n
", + "parameters": [ + { + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "$ref": "#/definitions/JSA_put_api_simple_fulltext" + }, + "x-description-offset": 59 + } + ], + "responses": { + "201": { + "description": "is returned if the query was executed successfully.
" + }, + "400": { + "description": "is returned if the body does not contain a valid JSON representation of a query. The response body contains an error document in this case.
" + }, + "404": { + "description": "is returned if the collection specified by collection is unknown. The response body contains an error document in this case.
" + } + }, + "summary": " Fulltext index query", + "tags": [ + "Simple Queries" + ], + "x-examples": [], + "x-filename": "Simple Queries - js/actions/api-simple.js, arangod/RestHandler/RestSimpleHandler.cpp, arangod/RestHandler/RestSimpleQueryHandler.cpp" + } + }, + "/_api/simple/last": { + "put": { + "description": "**A json post document with these Properties is required:**
  • count: the number of documents to return at most. Specifying count is optional. If it is not specified, it defaults to 1.
  • collection: the name of the collection
\n\n
This will return the last documents from the collection, in the order of insertion/update time. When the count argument is supplied, the result will be an array of documents, with the \"latest\" document being first in the result array.
If the count argument is not supplied, the result is the \"latest\" document of the collection, or null if the collection is empty.
Note: this method is not supported for sharded collections with more than one shard.

Example: Retrieving the last n documents

shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/simple/last <<EOF\n{ \n  \"collection\" : \"products\", \n  \"count\" : 2 \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : [ \n    { \n      \"_id\" : \"products/793369031\", \n      \"_key\" : \"793369031\", \n      \"_rev\" : \"793369031\", \n      \"i\" : 1, \n      \"a\" : { \n        \"k\" : 2, \n        \"j\" : 2 \n      } \n    }, \n    { \n      \"_id\" : \"products/793172423\", \n      \"_key\" : \"793172423\", \n      \"_rev\" : \"793172423\", \n      \"i\" : 1 \n    } \n  ], \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
Example: Retrieving the first document

shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/simple/last <<EOF\n{ \n  \"collection\" : \"products\" \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : { \n    \"_id\" : \"products/795007431\", \n    \"_key\" : \"795007431\", \n    \"_rev\" : \"795007431\", \n    \"i\" : 1, \n    \"a\" : { \n      \"k\" : 2, \n      \"j\" : 2 \n    } \n  }, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
", + "parameters": [ + { + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "$ref": "#/definitions/JSA_put_api_simple_last" + }, + "x-description-offset": 59 + } + ], + "responses": { + "200": { + "description": "is returned when the query was successfully executed.
" + }, + "400": { + "description": "is returned if the body does not contain a valid JSON representation of a query. The response body contains an error document in this case.
" + }, + "404": { + "description": "is returned if the collection specified by collection is unknown. The response body contains an error document in this case.
" + } + }, + "summary": " Last document of a collection", + "tags": [ + "Simple Queries" + ], + "x-examples": [], + "x-filename": "Simple Queries - js/actions/api-simple.js, arangod/RestHandler/RestSimpleHandler.cpp, arangod/RestHandler/RestSimpleQueryHandler.cpp" + } + }, + "/_api/simple/lookup-by-keys": { + "put": { + "description": "**A json post document with these Properties is required:**
  • keys: array with the _keys of documents to remove. of type string
  • collection: The name of the collection to look in for the documents
\n\nLooks up the documents in the specified collection using the array of keys provided. All documents for which a matching key was specified in the keys array and that exist in the collection will be returned. Keys for which no document can be found in the underlying collection are ignored, and no exception will be thrown for them.
The body of the response contains a JSON object with a documents attribute. The documents attribute is an array containing the matching documents. The order in which matching documents are present in the result array is unspecified.

Example: Looking up existing documents

shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/simple/lookup-by-keys <<EOF\n{ \n  \"keys\" : [ \n    \"test0\", \n    \"test1\", \n    \"test2\", \n    \"test3\", \n    \"test4\", \n    \"test5\", \n    \"test6\", \n    \"test7\", \n    \"test8\", \n    \"test9\" \n  ], \n  \"collection\" : \"test\" \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"documents\" : [ \n    { \n      \"value\" : 0, \n      \"_id\" : \"test/test0\", \n      \"_rev\" : \"795597255\", \n      \"_key\" : \"test0\" \n    }, \n    { \n      \"value\" : 1, \n      \"_id\" : \"test/test1\", \n      \"_rev\" : \"795793863\", \n      \"_key\" : \"test1\" \n    }, \n    { \n      \"value\" : 2, \n      \"_id\" : \"test/test2\", \n      \"_rev\" : \"795990471\", \n      \"_key\" : \"test2\" \n    }, \n    { \n      \"value\" : 3, \n      \"_id\" : \"test/test3\", \n      \"_rev\" : \"796187079\", \n      \"_key\" : \"test3\" \n    }, \n    { \n      \"value\" : 4, \n      \"_id\" : \"test/test4\", \n      \"_rev\" : \"796383687\", \n      \"_key\" : \"test4\" \n    }, \n    { \n      \"value\" : 5, \n      \"_id\" : \"test/test5\", \n      \"_rev\" : \"796580295\", \n      \"_key\" : \"test5\" \n    }, \n    { \n      \"value\" : 6, \n      \"_id\" : \"test/test6\", \n      \"_rev\" : \"796776903\", \n      \"_key\" : \"test6\" \n    }, \n    { \n      \"value\" : 7, \n      \"_id\" : \"test/test7\", \n      \"_rev\" : \"796973511\", \n      \"_key\" : \"test7\" \n    }, \n    { \n      \"value\" : 8, \n      \"_id\" : \"test/test8\", \n      \"_rev\" : \"797170119\", \n      \"_key\" : \"test8\" \n    }, \n    { \n      \"value\" : 9, \n      \"_id\" : \"test/test9\", \n      \"_rev\" : \"797366727\", \n      \"_key\" : \"test9\" \n    } \n  ], \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
Example: Looking up non-existing documents

shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/simple/lookup-by-keys <<EOF\n{ \n  \"keys\" : [ \n    \"foo\", \n    \"bar\", \n    \"baz\" \n  ], \n  \"collection\" : \"test\" \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"documents\" : [ ], \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n

", + "parameters": [ + { + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "$ref": "#/definitions/RestLookupByKeys" + }, + "x-description-offset": 59 + } + ], + "responses": { + "200": { + "description": "is returned if the operation was carried out successfully.
" + }, + "404": { + "description": "is returned if the collection was not found. The response body contains an error document in this case.
" + }, + "405": { + "description": "is returned if the operation was called with a different HTTP METHOD than PUT.
" + } + }, + "summary": " Find documents by their keys", + "tags": [ + "Simple Queries" + ], + "x-examples": [], + "x-filename": "Simple Queries - js/actions/api-simple.js, arangod/RestHandler/RestSimpleHandler.cpp, arangod/RestHandler/RestSimpleQueryHandler.cpp" + } + }, + "/_api/simple/near": { + "put": { + "description": "**A json post document with these Properties is required:**
  • distance: If given, the attribute key used to return the distance to the given coordinate. (optional). If specified, distances are returned in meters.
  • skip: The number of documents to skip in the query. (optional)
  • longitude: The longitude of the coordinate.
  • limit: The maximal amount of documents to return. The skip is applied before the limit restriction. The default is 100. (optional)
  • collection: The name of the collection to query.
  • latitude: The latitude of the coordinate.
  • geo: If given, the identifier of the geo-index to use. (optional)
\n\n
The default will find at most 100 documents near the given coordinate. The returned array is sorted according to the distance, with the nearest document being first in the return array. If there are near documents of equal distance, documents are chosen randomly from this set until the limit is reached.
In order to use the near operator, a geo index must be defined for the collection. This index also defines which attribute holds the coordinates for the document. If you have more than one geo-spatial index, you can use the geo field to select a particular index.

Returns a cursor containing the result, see [Http Cursor](../HttpAqlQueryCursor/README.md) for details.
Note: the near simple query is deprecated as of ArangoDB 2.6. This API may be removed in future versions of ArangoDB. The preferred way for retrieving documents from a collection using the near operator is to issue an [AQL query](../Aql/GeoFunctions.md) using the NEAR function as follows:

FOR doc IN NEAR(@@collection, @latitude, @longitude, @limit) RETURN doc`

Example: Without distance

shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/simple/near <<EOF\n{ \n  \"collection\" : \"products\", \n  \"latitude\" : 0, \n  \"longitude\" : 0, \n  \"skip\" : 1, \n  \"limit\" : 2 \n}\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : [ \n    { \n      \"_id\" : \"products/801823175\", \n      \"_key\" : \"801823175\", \n      \"_rev\" : \"801823175\", \n      \"name\" : \"Name/0.002/\", \n      \"loc\" : [ \n        0.002, \n        0 \n      ] \n    }, \n    { \n      \"_id\" : \"products/801429959\", \n      \"_key\" : \"801429959\", \n      \"_rev\" : \"801429959\", \n      \"name\" : \"Name/-0.002/\", \n      \"loc\" : [ \n        -0.002, \n        0 \n      ] \n    } \n  ], \n  \"hasMore\" : false, \n  \"count\" : 2, \n  \"error\" : false, \n  \"code\" : 201 \n}\n

\n
Example: With distance

shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/simple/near <<EOF\n{ \n  \"collection\" : \"products\", \n  \"latitude\" : 0, \n  \"longitude\" : 0, \n  \"skip\" : 1, \n  \"limit\" : 3, \n  \"distance\" : \"distance\" \n}\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : [ \n    { \n      \"_id\" : \"products/804444615\", \n      \"_key\" : \"804444615\", \n      \"_rev\" : \"804444615\", \n      \"name\" : \"Name/-0.002/\", \n      \"loc\" : [ \n        -0.002, \n        0 \n      ], \n      \"distance\" : 222.38985328911744 \n    }, \n    { \n      \"_id\" : \"products/804837831\", \n      \"_key\" : \"804837831\", \n      \"_rev\" : \"804837831\", \n      \"name\" : \"Name/0.002/\", \n      \"loc\" : [ \n        0.002, \n        0 \n      ], \n      \"distance\" : 222.38985328911744 \n    }, \n    { \n      \"_id\" : \"products/804248007\", \n      \"_key\" : \"804248007\", \n      \"_rev\" : \"804248007\", \n      \"name\" : \"Name/-0.004/\", \n      \"loc\" : [ \n        -0.004, \n        0 \n      ], \n      \"distance\" : 444.779706578235 \n    } \n  ], \n  \"hasMore\" : false, \n  \"count\" : 3, \n  \"error\" : false, \n  \"code\" : 201 \n}\n

\n
", + "parameters": [ + { + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "$ref": "#/definitions/JSA_put_api_simple_near" + }, + "x-description-offset": 59 + } + ], + "responses": { + "201": { + "description": "is returned if the query was executed successfully.
" + }, + "400": { + "description": "is returned if the body does not contain a valid JSON representation of a query. The response body contains an error document in this case.
" + }, + "404": { + "description": "is returned if the collection specified by collection is unknown. The response body contains an error document in this case.
" + } + }, + "summary": " Returns documents near a coordinate", + "tags": [ + "Simple Queries" + ], + "x-examples": [], + "x-filename": "Simple Queries - js/actions/api-simple.js, arangod/RestHandler/RestSimpleHandler.cpp, arangod/RestHandler/RestSimpleQueryHandler.cpp" + } + }, + "/_api/simple/range": { + "put": { + "description": "**A json post document with these Properties is required:**
  • right: The upper bound.
  • attribute: The attribute path to check.
  • collection: The name of the collection to query.
  • limit: The maximal amount of documents to return. The skip is applied before the limit restriction. (optional)
  • closed: If true, use interval including left and right, otherwise exclude right, but include left.
  • skip: The number of documents to skip in the query (optional).
  • left: The lower bound.
\n\n
This will find all documents within a given range. In order to execute a range query, a skip-list index on the queried attribute must be present.
Returns a cursor containing the result, see [Http Cursor](../HttpAqlQueryCursor/README.md) for details.
Note: the range simple query is deprecated as of ArangoDB 2.6. The function may be removed in future versions of ArangoDB. The preferred way for retrieving documents from a collection within a specific range is to use an AQL query as follows:

FOR doc IN @@collection FILTER doc.value >= @left && doc.value < @right LIMIT @skip, @limit RETURN doc`

Example:

shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/simple/range <<EOF\n{ \n  \"collection\" : \"products\", \n  \"attribute\" : \"i\", \n  \"left\" : 2, \n  \"right\" : 4 \n}\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : [ \n    { \n      \"_id\" : \"products/806738375\", \n      \"_key\" : \"806738375\", \n      \"_rev\" : \"806738375\", \n      \"i\" : 2 \n    }, \n    { \n      \"_id\" : \"products/806934983\", \n      \"_key\" : \"806934983\", \n      \"_rev\" : \"806934983\", \n      \"i\" : 3 \n    } \n  ], \n  \"hasMore\" : false, \n  \"count\" : 2, \n  \"error\" : false, \n  \"code\" : 201 \n}\n

\n
", + "parameters": [ + { + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "$ref": "#/definitions/JSA_put_api_simple_range" + }, + "x-description-offset": 59 + } + ], + "responses": { + "201": { + "description": "is returned if the query was executed successfully.
" + }, + "400": { + "description": "is returned if the body does not contain a valid JSON representation of a query. The response body contains an error document in this case.
" + }, + "404": { + "description": "is returned if the collection specified by collection is unknown or no suitable index for the range query is present. The response body contains an error document in this case.
" + } + }, + "summary": " Simple range query", + "tags": [ + "Simple Queries" + ], + "x-examples": [], + "x-filename": "Simple Queries - js/actions/api-simple.js, arangod/RestHandler/RestSimpleHandler.cpp, arangod/RestHandler/RestSimpleQueryHandler.cpp" + } + }, + "/_api/simple/remove-by-example": { + "put": { + "description": "**A json post document with these Properties is required:**
  • example: An example document that all collection documents are compared against.
  • collection: The name of the collection to remove from.
  • options: a json object which can contains following attributes:
    • limit: an optional value that determines how many documents to delete at most. If limit is specified but is less than the number of documents in the collection, it is undefined which of the documents will be deleted.
    • waitForSync: if set to true, then all removal operations will instantly be synchronized to disk. If this is not specified, then the collection's default sync behavior will be applied.
\n\n
This will find all documents in the collection that match the specified example object.
Note: the limit attribute is not supported on sharded collections. Using it will result in an error.
Returns the number of documents that were deleted.

Example:

shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/simple/remove-by-example <<EOF\n{ \n  \"collection\" : \"products\", \n  \"example\" : { \n    \"a\" : { \n      \"j\" : 1 \n    } \n  } \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"deleted\" : 1, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
Example: Using Parameter: waitForSync and limit

shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/simple/remove-by-example <<EOF\n{ \n  \"collection\" : \"products\", \n  \"example\" : { \n    \"a\" : { \n      \"j\" : 1 \n    } \n  }, \n  \"waitForSync\" : true, \n  \"limit\" : 2 \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"deleted\" : 1, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
Example: Using Parameter: waitForSync and limit with new signature

shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/simple/remove-by-example <<EOF\n{ \n  \"collection\" : \"products\", \n  \"example\" : { \n    \"a\" : { \n      \"j\" : 1 \n    } \n  }, \n  \"options\" : { \n    \"waitForSync\" : true, \n    \"limit\" : 2 \n  } \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"deleted\" : 1, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
", + "parameters": [ + { + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "$ref": "#/definitions/JSA_put_api_simple_remove_by_example" + }, + "x-description-offset": 59 + } + ], + "responses": { + "200": { + "description": "is returned if the query was executed successfully.
" + }, + "400": { + "description": "is returned if the body does not contain a valid JSON representation of a query. The response body contains an error document in this case.
" + }, + "404": { + "description": "is returned if the collection specified by collection is unknown. The response body contains an error document in this case.
" + } + }, + "summary": " Remove documents by example", + "tags": [ + "Simple Queries" + ], + "x-examples": [], + "x-filename": "Simple Queries - js/actions/api-simple.js, arangod/RestHandler/RestSimpleHandler.cpp, arangod/RestHandler/RestSimpleQueryHandler.cpp" + } + }, + "/_api/simple/remove-by-keys": { + "put": { + "description": "**A json post document with these Properties is required:**
  • keys: array with the _keys of documents to remove. of type string
  • options: a json object which can contains following attributes:
    • waitForSync: if set to true, then all removal operations will instantly be synchronized to disk. If this is not specified, then the collection's default sync behavior will be applied.
  • collection: The name of the collection to look in for the documents to remove
\n\nLooks up the documents in the specified collection using the array of keys provided, and removes all documents from the collection whose keys are contained in the keys array. Keys for which no document can be found in the underlying collection are ignored, and no exception will be thrown for them.
The body of the response contains a JSON object with information how many documents were removed (and how many were not). The removed attribute will contain the number of actually removed documents. The ignored attribute will contain the number of keys in the request for which no matching document could be found.

Example:

shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/simple/remove-by-keys <<EOF\n{ \n  \"keys\" : [ \n    \"test0\", \n    \"test1\", \n    \"test2\", \n    \"test3\", \n    \"test4\", \n    \"test5\", \n    \"test6\", \n    \"test7\", \n    \"test8\", \n    \"test9\" \n  ], \n  \"collection\" : \"test\" \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"removed\" : 10, \n  \"ignored\" : 0, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
Example:

shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/simple/remove-by-keys <<EOF\n{ \n  \"keys\" : [ \n    \"foo\", \n    \"bar\", \n    \"baz\" \n  ], \n  \"collection\" : \"test\" \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"removed\" : 0, \n  \"ignored\" : 3, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n

", + "parameters": [ + { + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "$ref": "#/definitions/RestRemoveByKeys" + }, + "x-description-offset": 59 + } + ], + "responses": { + "200": { + "description": "is returned if the operation was carried out successfully. The number of removed documents may still be 0 in this case if none of the specified document keys were found in the collection.
" + }, + "404": { + "description": "is returned if the collection was not found. The response body contains an error document in this case.
" + }, + "405": { + "description": "is returned if the operation was called with a different HTTP METHOD than PUT.
" + } + }, + "summary": " Remove documents by their keys", + "tags": [ + "Simple Queries" + ], + "x-examples": [], + "x-filename": "Simple Queries - js/actions/api-simple.js, arangod/RestHandler/RestSimpleHandler.cpp, arangod/RestHandler/RestSimpleQueryHandler.cpp" + } + }, + "/_api/simple/replace-by-example": { + "put": { + "description": "**A json post document with these Properties is required:**
  • options: a json object which can contain following attributes
    • limit: an optional value that determines how many documents to replace at most. If limit is specified but is less than the number of documents in the collection, it is undefined which of the documents will be replaced.

    • waitForSync: if set to true, then all removal operations will instantly be synchronized to disk. If this is not specified, then the collection's default sync behavior will be applied.
  • example: An example document that all collection documents are compared against.
  • collection: The name of the collection to replace within.
  • newValue: The replacement document that will get inserted in place of the \"old\" documents.
\n\n
This will find all documents in the collection that match the specified example object, and replace the entire document body with the new value specified. Note that document meta-attributes such as _id, _key, _from, _to etc. cannot be replaced.
Note: the limit attribute is not supported on sharded collections. Using it will result in an error.
Returns the number of documents that were replaced.

Example:

shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/simple/replace-by-example <<EOF\n{ \n  \"collection\" : \"products\", \n  \"example\" : { \n    \"a\" : { \n      \"j\" : 1 \n    } \n  }, \n  \"newValue\" : { \n    \"foo\" : \"bar\" \n  }, \n  \"limit\" : 3 \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"replaced\" : 1, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
Example: Using new Signature for attributes WaitForSync and limit

shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/simple/replace-by-example <<EOF\n{ \n  \"collection\" : \"products\", \n  \"example\" : { \n    \"a\" : { \n      \"j\" : 1 \n    } \n  }, \n  \"newValue\" : { \n    \"foo\" : \"bar\" \n  }, \n  \"options\" : { \n    \"limit\" : 3, \n    \"waitForSync\" : true \n  } \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"replaced\" : 1, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
", + "parameters": [ + { + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "$ref": "#/definitions/JSA_put_api_simple_replace_by_example" + }, + "x-description-offset": 59 + } + ], + "responses": { + "200": { + "description": "is returned if the query was executed successfully.
" + }, + "400": { + "description": "is returned if the body does not contain a valid JSON representation of a query. The response body contains an error document in this case.
" + }, + "404": { + "description": "is returned if the collection specified by collection is unknown. The response body contains an error document in this case.
" + } + }, + "summary": " Replace documents by example", + "tags": [ + "Simple Queries" + ], + "x-examples": [], + "x-filename": "Simple Queries - js/actions/api-simple.js, arangod/RestHandler/RestSimpleHandler.cpp, arangod/RestHandler/RestSimpleQueryHandler.cpp" + } + }, + "/_api/simple/update-by-example": { + "put": { + "description": "**A json post document with these Properties is required:**
  • options: a json object which can contains following attributes:
    • keepNull: This parameter can be used to modify the behavior when handling null values. Normally, null values are stored in the database. By setting the keepNull parameter to false, this behavior can be changed so that all attributes in data with null values will be removed from the updated document.
    • limit: an optional value that determines how many documents to update at most. If limit is specified but is less than the number of documents in the collection, it is undefined which of the documents will be updated.
    • waitForSync: if set to true, then all removal operations will instantly be synchronized to disk. If this is not specified, then the collection's default sync behavior will be applied.
  • example: An example document that all collection documents are compared against.
  • collection: The name of the collection to update within.
  • newValue: A document containing all the attributes to update in the found documents.
\n\n
This will find all documents in the collection that match the specified example object, and partially update the document body with the new value specified. Note that document meta-attributes such as _id, _key, _from, _to etc. cannot be replaced.
Note: the limit attribute is not supported on sharded collections. Using it will result in an error.
Returns the number of documents that were updated.


Example: using old syntax for options

shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/simple/update-by-example <<EOF\n{ \n  \"collection\" : \"products\", \n  \"example\" : { \n    \"a\" : { \n      \"j\" : 1 \n    } \n  }, \n  \"newValue\" : { \n    \"a\" : { \n      \"j\" : 22 \n    } \n  }, \n  \"limit\" : 3 \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"updated\" : 1, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
Example: using new signature for options

shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/simple/update-by-example <<EOF\n{ \n  \"collection\" : \"products\", \n  \"example\" : { \n    \"a\" : { \n      \"j\" : 1 \n    } \n  }, \n  \"newValue\" : { \n    \"a\" : { \n      \"j\" : 22 \n    } \n  }, \n  \"options\" : { \n    \"limit\" : 3, \n    \"waitForSync\" : true \n  } \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"updated\" : 1, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
", + "parameters": [ + { + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "$ref": "#/definitions/JSA_put_api_simple_update_by_example" + }, + "x-description-offset": 59 + } + ], + "responses": { + "200": { + "description": "is returned if the collection was updated successfully and waitForSync was true.
" + }, + "400": { + "description": "is returned if the body does not contain a valid JSON representation of a query. The response body contains an error document in this case.
" + }, + "404": { + "description": "is returned if the collection specified by collection is unknown. The response body contains an error document in this case.
" + } + }, + "summary": " Update documents by example", + "tags": [ + "Simple Queries" + ], + "x-examples": [], + "x-filename": "Simple Queries - js/actions/api-simple.js, arangod/RestHandler/RestSimpleHandler.cpp, arangod/RestHandler/RestSimpleQueryHandler.cpp" + } + }, + "/_api/simple/within": { + "put": { + "description": "**A json post document with these Properties is required:**
  • distance: If given, the attribute key used to return the distance to the given coordinate. (optional). If specified, distances are returned in meters.
  • skip: The number of documents to skip in the query. (optional)
  • longitude: The longitude of the coordinate.
  • radius: The maximal radius (in meters).
  • collection: The name of the collection to query.
  • latitude: The latitude of the coordinate.
  • limit: The maximal amount of documents to return. The skip is applied before the limit restriction. The default is 100. (optional)
  • geo: If given, the identifier of the geo-index to use. (optional)
\n\n
This will find all documents within a given radius around the coordinate (latitude, longitude). The returned list is sorted by distance.
In order to use the within operator, a geo index must be defined for the collection. This index also defines which attribute holds the coordinates for the document. If you have more than one geo-spatial index, you can use the geo field to select a particular index.

Returns a cursor containing the result, see [Http Cursor](../HttpAqlQueryCursor/README.md) for details.
Note: the within simple query is deprecated as of ArangoDB 2.6. This API may be removed in future versions of ArangoDB. The preferred way for retrieving documents from a collection using the near operator is to issue an [AQL query](../Aql/GeoFunctions.md) using the WITHIN function as follows:

FOR doc IN WITHIN(@@collection, @latitude, @longitude, @radius, @distanceAttributeName) RETURN doc

Example: Without distance

shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/simple/near <<EOF\n{ \n  \"collection\" : \"products\", \n  \"latitude\" : 0, \n  \"longitude\" : 0, \n  \"skip\" : 1, \n  \"limit\" : 2, \n  \"radius\" : 500 \n}\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : [ \n    { \n      \"_id\" : \"products/829610439\", \n      \"_key\" : \"829610439\", \n      \"_rev\" : \"829610439\", \n      \"name\" : \"Name/0.002/\", \n      \"loc\" : [ \n        0.002, \n        0 \n      ] \n    }, \n    { \n      \"_id\" : \"products/829217223\", \n      \"_key\" : \"829217223\", \n      \"_rev\" : \"829217223\", \n      \"name\" : \"Name/-0.002/\", \n      \"loc\" : [ \n        -0.002, \n        0 \n      ] \n    } \n  ], \n  \"hasMore\" : false, \n  \"count\" : 2, \n  \"error\" : false, \n  \"code\" : 201 \n}\n

\n
Example: With distance

shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/simple/near <<EOF\n{ \n  \"collection\" : \"products\", \n  \"latitude\" : 0, \n  \"longitude\" : 0, \n  \"skip\" : 1, \n  \"limit\" : 3, \n  \"distance\" : \"distance\", \n  \"radius\" : 300 \n}\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : [ \n    { \n      \"_id\" : \"products/832231879\", \n      \"_key\" : \"832231879\", \n      \"_rev\" : \"832231879\", \n      \"name\" : \"Name/-0.002/\", \n      \"loc\" : [ \n        -0.002, \n        0 \n      ], \n      \"distance\" : 222.38985328911744 \n    }, \n    { \n      \"_id\" : \"products/832625095\", \n      \"_key\" : \"832625095\", \n      \"_rev\" : \"832625095\", \n      \"name\" : \"Name/0.002/\", \n      \"loc\" : [ \n        0.002, \n        0 \n      ], \n      \"distance\" : 222.38985328911744 \n    }, \n    { \n      \"_id\" : \"products/832035271\", \n      \"_key\" : \"832035271\", \n      \"_rev\" : \"832035271\", \n      \"name\" : \"Name/-0.004/\", \n      \"loc\" : [ \n        -0.004, \n        0 \n      ], \n      \"distance\" : 444.779706578235 \n    } \n  ], \n  \"hasMore\" : false, \n  \"count\" : 3, \n  \"error\" : false, \n  \"code\" : 201 \n}\n

\n
", + "parameters": [ + { + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "$ref": "#/definitions/JSA_put_api_simple_within" + }, + "x-description-offset": 59 + } + ], + "responses": { + "201": { + "description": "is returned if the query was executed successfully.
" + }, + "400": { + "description": "is returned if the body does not contain a valid JSON representation of a query. The response body contains an error document in this case.
" + }, + "404": { + "description": "is returned if the collection specified by collection is unknown. The response body contains an error document in this case.
" + } + }, + "summary": " Find documents within a radius around a coordinate", + "tags": [ + "Simple Queries" + ], + "x-examples": [], + "x-filename": "Simple Queries - js/actions/api-simple.js, arangod/RestHandler/RestSimpleHandler.cpp, arangod/RestHandler/RestSimpleQueryHandler.cpp" + } + }, + "/_api/simple/within-rectangle": { + "put": { + "description": "**A json post document with these Properties is required:**
  • latitude1: The latitude of the first rectangle coordinate.
  • skip: The number of documents to skip in the query. (optional)
  • latitude2: The latitude of the second rectangle coordinate.
  • longitude2: The longitude of the second rectangle coordinate.
  • longitude1: The longitude of the first rectangle coordinate.
  • limit: The maximal amount of documents to return. The skip is applied before the limit restriction. The default is 100. (optional)
  • collection: The name of the collection to query.
  • geo: If given, the identifier of the geo-index to use. (optional)
\n\n
This will find all documents within the specified rectangle (determined by the given coordinates (latitude1, longitude1, latitude2, longitude2).
In order to use the within-rectangle query, a geo index must be defined for the collection. This index also defines which attribute holds the coordinates for the document. If you have more than one geo-spatial index, you can use the geo field to select a particular index.
Returns a cursor containing the result, see [Http Cursor](../HttpAqlQueryCursor/README.md) for details.

Example:

shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/simple/within-rectangle <<EOF\n{ \n  \"collection\" : \"products\", \n  \"latitude1\" : 0, \n  \"longitude1\" : 0, \n  \"latitude2\" : 0.2, \n  \"longitude2\" : 0.2, \n  \"skip\" : 1, \n  \"limit\" : 2 \n}\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : [ \n    { \n      \"_id\" : \"products/836229575\", \n      \"_key\" : \"836229575\", \n      \"_rev\" : \"836229575\", \n      \"name\" : \"Name/0.008/\", \n      \"loc\" : [ \n        0.008, \n        0 \n      ] \n    }, \n    { \n      \"_id\" : \"products/836032967\", \n      \"_key\" : \"836032967\", \n      \"_rev\" : \"836032967\", \n      \"name\" : \"Name/0.006/\", \n      \"loc\" : [ \n        0.006, \n        0 \n      ] \n    } \n  ], \n  \"hasMore\" : false, \n  \"count\" : 2, \n  \"error\" : false, \n  \"code\" : 201 \n}\n

\n
", + "parameters": [ + { + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "$ref": "#/definitions/JSA_put_api_simple_within_rectangle" + }, + "x-description-offset": 59 + } + ], + "responses": { + "201": { + "description": "is returned if the query was executed successfully.
" + }, + "400": { + "description": "is returned if the body does not contain a valid JSON representation of a query. The response body contains an error document in this case.
" + }, + "404": { + "description": "is returned if the collection specified by collection is unknown. The response body contains an error document in this case.
" + } + }, + "summary": " Within rectangle query", + "tags": [ + "Simple Queries" + ], + "x-examples": [], + "x-filename": "Simple Queries - js/actions/api-simple.js, arangod/RestHandler/RestSimpleHandler.cpp, arangod/RestHandler/RestSimpleQueryHandler.cpp" + } + }, + "/_api/tasks": { + "post": { + "description": "**A json post document with these Properties is required:**
  • params: The parameters to be passed into command
  • offset: Number of seconds initial delay
  • command: The JavaScript code to be executed
  • name: The name of the task
  • period: number of seconds between the executions
\n\ncreates a new task with a generated id

Example:

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/tasks/ <<EOF\n{ \n  \"name\" : \"SampleTask\", \n  \"command\" : \"(function(params) { require('internal').print(params); })(params)\", \n  \"params\" : { \n    \"foo\" : \"bar\", \n    \"bar\" : \"foo\" \n  }, \n  \"period\" : 2 \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"id\" : \"836884935\", \n  \"name\" : \"SampleTask\", \n  \"type\" : \"periodic\", \n  \"period\" : 2, \n  \"created\" : 1443627622.01888, \n  \"command\" : \"(function(params) { require('internal').print(params); })(params)\", \n  \"database\" : \"_system\", \n  \"error\" : false, \n  \"code\" : 200 \n}\nshell> curl -X DELETE --dump - http://localhost:8529/_api/tasks/836884935\n\n

\n
", + "parameters": [ + { + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "$ref": "#/definitions/JSF_post_api_new_tasks" + }, + "x-description-offset": 59 + } + ], + "responses": { + "400": { + "description": "If the post body is not accurate, a HTTP 400 is returned.
" + } + }, + "summary": " creates a task", + "tags": [ + "Administration" + ], + "x-examples": [], + "x-filename": "Administration - js/actions/_admin/app.js, js/actions/_admin/routing/app.js, js/actions/_admin/server/app.js, js/actions/_admin/database/app.js, arangod/RestHandler/RestShutdownHandler.cpp, arangod/RestHandler/RestAdminLogHandler.cpp, js/actions/api-tasks.js, js/actions/api-endpoint.js, arangod/RestHandler/RestVersionHandler.cpp, js/actions/api-system.js" + } + }, + "/_api/tasks/": { + "get": { + "description": "\n\nfetches all existing tasks on the server

Example: Fetching all tasks

shell> curl --dump - http://localhost:8529/_api/tasks\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n[ \n  { \n    \"id\" : \"16898503\", \n    \"name\" : \"user-defined task\", \n    \"type\" : \"periodic\", \n    \"period\" : 1, \n    \"created\" : 1443627553.436199, \n    \"command\" : \"(function () {\\n      require('org/arangodb/foxx/queues/manager').manage();\\n    })(params)\", \n    \"database\" : \"_system\" \n  }, \n  { \n    \"id\" : \"statistics-gc\", \n    \"name\" : \"statistics-gc\", \n    \"type\" : \"periodic\", \n    \"period\" : 450, \n    \"created\" : 1443627552.94918, \n    \"command\" : \"require('org/arangodb/statistics').garbageCollector();\", \n    \"database\" : \"_system\" \n  }, \n  { \n    \"id\" : \"statistics-average-collector\", \n    \"name\" : \"statistics-average-collector\", \n    \"type\" : \"periodic\", \n    \"period\" : 900, \n    \"created\" : 1443627552.946052, \n    \"command\" : \"require('org/arangodb/statistics').historianAverage();\", \n    \"database\" : \"_system\" \n  }, \n  { \n    \"id\" : \"statistics-collector\", \n    \"name\" : \"statistics-collector\", \n    \"type\" : \"periodic\", \n    \"period\" : 10, \n    \"created\" : 1443627552.945114, \n    \"command\" : \"require('org/arangodb/statistics').historian();\", \n    \"database\" : \"_system\" \n  } \n]\n

\n
", + "parameters": [], + "responses": { + "200": { + "description": "The list of tasks
" + } + }, + "summary": " Fetch all tasks or one task", + "tags": [ + "Administration" + ], + "x-examples": [], + "x-filename": "Administration - js/actions/_admin/app.js, js/actions/_admin/routing/app.js, js/actions/_admin/server/app.js, js/actions/_admin/database/app.js, arangod/RestHandler/RestShutdownHandler.cpp, arangod/RestHandler/RestAdminLogHandler.cpp, js/actions/api-tasks.js, js/actions/api-endpoint.js, arangod/RestHandler/RestVersionHandler.cpp, js/actions/api-system.js" + } + }, + "/_api/tasks/{id}": { + "delete": { + "description": "\n\nDeletes the task identified by id on the server.

Example: trying to delete non existing task

shell> curl -X DELETE --dump - http://localhost:8529/_api/tasks/NoTaskWithThatName\n\nHTTP/1.1 404 Not Found\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"error\" : true, \n  \"code\" : 404, \n  \"errorNum\" : 1852, \n  \"errorMessage\" : \"task not found\" \n}\n

\n
Example: Remove existing Task

shell> curl -X DELETE --dump - http://localhost:8529/_api/tasks/SampleTask\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
", + "parameters": [ + { + "description": "The id of the task to delete.
", + "format": "string", + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "responses": { + "404": { + "description": "If the task id is unknown, then an HTTP 404 is returned.
" + } + }, + "summary": " deletes the task with id", + "tags": [ + "Administration" + ], + "x-examples": [], + "x-filename": "Administration - js/actions/_admin/app.js, js/actions/_admin/routing/app.js, js/actions/_admin/server/app.js, js/actions/_admin/database/app.js, arangod/RestHandler/RestShutdownHandler.cpp, arangod/RestHandler/RestAdminLogHandler.cpp, js/actions/api-tasks.js, js/actions/api-endpoint.js, arangod/RestHandler/RestVersionHandler.cpp, js/actions/api-system.js" + }, + "get": { + "description": "\n\nfetches one existing tasks on the server specified by id

Example: Fetching a single task by its id

shell> curl --dump - http://localhost:8529/_api/tasks/statistics-average-collector\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"id\" : \"statistics-average-collector\", \n  \"name\" : \"statistics-average-collector\", \n  \"type\" : \"periodic\", \n  \"period\" : 900, \n  \"created\" : 1443627552.946052, \n  \"command\" : \"require('org/arangodb/statistics').historianAverage();\", \n  \"database\" : \"_system\", \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
Example: trying to fetch a non-existing task

shell> curl --dump - http://localhost:8529/_api/tasks/non-existing-task\n\nHTTP/1.1 404 Not Found\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"error\" : true, \n  \"code\" : 404, \n  \"errorNum\" : 1852, \n  \"errorMessage\" : \"task not found\" \n}\n

\n
", + "parameters": [ + { + "description": "The id of the task to fetch.
", + "format": "string", + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "The requested task
" + } + }, + "summary": " Fetch one task with id", + "tags": [ + "Administration" + ], + "x-examples": [], + "x-filename": "Administration - js/actions/_admin/app.js, js/actions/_admin/routing/app.js, js/actions/_admin/server/app.js, js/actions/_admin/database/app.js, arangod/RestHandler/RestShutdownHandler.cpp, arangod/RestHandler/RestAdminLogHandler.cpp, js/actions/api-tasks.js, js/actions/api-endpoint.js, arangod/RestHandler/RestVersionHandler.cpp, js/actions/api-system.js" + }, + "put": { + "description": "**A json post document with these Properties is required:**
  • params: The parameters to be passed into command
  • offset: Number of seconds initial delay
  • command: The JavaScript code to be executed
  • name: The name of the task
  • period: number of seconds between the executions
\n\nregisters a new task with the specified id

Example:

shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/tasks/sampleTask <<EOF\n{ \n  \"id\" : \"SampleTask\", \n  \"name\" : \"SampleTask\", \n  \"command\" : \"(function(params) { require('internal').print(params); })(params)\", \n  \"params\" : { \n    \"foo\" : \"bar\", \n    \"bar\" : \"foo\" \n  }, \n  \"period\" : 2 \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"id\" : \"sampleTask\", \n  \"name\" : \"SampleTask\", \n  \"type\" : \"periodic\", \n  \"period\" : 2, \n  \"created\" : 1443627622.623117, \n  \"command\" : \"(function(params) { require('internal').print(params); })(params)\", \n  \"database\" : \"_system\", \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
", + "parameters": [ + { + "description": "The id of the task to create
", + "format": "string", + "in": "path", + "name": "id", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "$ref": "#/definitions/JSF_put_api_new_tasks" + }, + "x-description-offset": 59 + } + ], + "responses": { + "400": { + "description": "If the task id already exists or the rest body is not accurate, HTTP 400 is returned.
" + } + }, + "summary": " creates a task with id", + "tags": [ + "Administration" + ], + "x-examples": [], + "x-filename": "Administration - js/actions/_admin/app.js, js/actions/_admin/routing/app.js, js/actions/_admin/server/app.js, js/actions/_admin/database/app.js, arangod/RestHandler/RestShutdownHandler.cpp, arangod/RestHandler/RestAdminLogHandler.cpp, js/actions/api-tasks.js, js/actions/api-endpoint.js, arangod/RestHandler/RestVersionHandler.cpp, js/actions/api-system.js" + } + }, + "/_api/transaction": { + "post": { + "description": "**A json post document with these Properties is required:**
  • action: the actual transaction operations to be executed, in the form of stringified JavaScript code. The code will be executed on server side, with late binding. It is thus critical that the code specified in action properly sets up all the variables it needs. If the code specified in action ends with a return statement, the value returned will also be returned by the REST API in the result attribute if the transaction committed successfully.
  • params: optional arguments passed to action.
  • collections: contains the array of collections to be used in the transaction (mandatory). collections must be a JSON object that can have the optional sub-attributes read and write. read and write must each be either arrays of collections names or strings with a single collection name.
  • lockTimeout: an optional numeric value that can be used to set a timeout for waiting on collection locks. If not specified, a default value will be used. Setting lockTimeout to 0 will make ArangoDB not time out waiting for a lock.
  • waitForSync: an optional boolean flag that, if set, will force the transaction to write all data to disk before returning.
\n\n
Contains the collections and action.
The transaction description must be passed in the body of the POST request.
If the transaction is fully executed and committed on the server, HTTP 200 will be returned. Additionally, the return value of the code defined in action will be returned in the result attribute.
For successfully committed transactions, the returned JSON object has the following properties:
  • error: boolean flag to indicate if an error occurred (false in this case)
  • code: the HTTP status code
  • result: the return value of the transaction
If the transaction specification is either missing or malformed, the server will respond with HTTP 400.
The body of the response will then contain a JSON object with additional error details. The object has the following attributes:
  • error: boolean flag to indicate that an error occurred (true in this case)
  • code: the HTTP status code
  • errorNum: the server error number
  • errorMessage: a descriptive error message
If a transaction fails to commit, either by an exception thrown in the action code, or by an internal error, the server will respond with an error. Any other errors will be returned with any of the return codes HTTP 400, HTTP 409, or HTTP 500.

Example: Executing a transaction on a single collection

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/transaction <<EOF\n{ \n  \"collections\" : { \n    \"write\" : \"products\" \n  }, \n  \"action\" : \"function () { var db = require('internal').db; db.products.save({});  return db.products.count(); }\" \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : 1, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
Example: Executing a transaction using multiple collections

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/transaction <<EOF\n{ \n  \"collections\" : { \n    \"write\" : [ \n      \"products\", \n      \"materials\" \n    ] \n  }, \n  \"action\" : \"function () {var db = require('internal').db;db.products.save({});db.materials.save({});return 'worked!';}\" \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : \"worked!\", \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
Example: Aborting a transaction due to an internal error

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/transaction <<EOF\n{ \n  \"collections\" : { \n    \"write\" : \"products\" \n  }, \n  \"action\" : \"function () {var db = require('internal').db;db.products.save({ _key: 'abc'});db.products.save({ _key: 'abc'});}\" \n}\nEOF\n\nHTTP/1.1 400 Bad Request\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"exception\" : \"[ArangoError 1210: unique constraint violated]\", \n  \"stacktrace\" : [ \n    \"[ArangoError 1210: unique constraint violated]\", \n    \"  at Error (native)\", \n    \"  at eval (<anonymous>:1:99)\", \n    \"  at eval (<anonymous>:1:122)\", \n    \"  at post_api_transaction (js/actions/api-transaction.js:268:16)\", \n    \"  at Function.actions.defineHttp.callback (js/actions/api-transaction.js:288:11)\" \n  ], \n  \"message\" : \"unique constraint violated\", \n  \"error\" : true, \n  \"code\" : 400, \n  \"errorNum\" : 1210, \n  \"errorMessage\" : \"unique constraint violated\" \n}\n

\n
Example: Aborting a transaction by explicitly throwing an exception

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/transaction <<EOF\n{ \n  \"collections\" : { \n    \"read\" : \"products\" \n  }, \n  \"action\" : \"function () { throw 'doh!'; }\" \n}\nEOF\n\nHTTP/1.1 500 Internal Server Error\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"exception\" : \"doh!\", \n  \"error\" : true, \n  \"code\" : 500, \n  \"errorNum\" : 500, \n  \"errorMessage\" : \"internal server error\" \n}\n

\n
Example: Referring to a non-existing collection

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/transaction <<EOF\n{ \n  \"collections\" : { \n    \"read\" : \"products\" \n  }, \n  \"action\" : \"function () { return true; }\" \n}\nEOF\n\nHTTP/1.1 404 Not Found\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"exception\" : \"[ArangoError 1203: collection not found]\", \n  \"stacktrace\" : [ \n    \"[ArangoError 1203: collection not found]\", \n    \"  at Error (native)\", \n    \"  at post_api_transaction (js/actions/api-transaction.js:268:16)\", \n    \"  at Function.actions.defineHttp.callback (js/actions/api-transaction.js:288:11)\" \n  ], \n  \"message\" : \"collection not found\", \n  \"error\" : true, \n  \"code\" : 404, \n  \"errorNum\" : 1203, \n  \"errorMessage\" : \"collection not found\" \n}\n

\n
", + "parameters": [ + { + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "$ref": "#/definitions/JSF_post_api_transaction" + }, + "x-description-offset": 59 + } + ], + "responses": { + "200": { + "description": "If the transaction is fully executed and committed on the server, HTTP 200 will be returned.
" + }, + "400": { + "description": "If the transaction specification is either missing or malformed, the server will respond with HTTP 400.
" + }, + "404": { + "description": "If the transaction specification contains an unknown collection, the server will respond with HTTP 404.
" + }, + "500": { + "description": "Exceptions thrown by users will make the server respond with a return code of HTTP 500
" + } + }, + "summary": " Execute transaction", + "tags": [ + "Transactions" + ], + "x-examples": [], + "x-filename": "Transactions - js/actions/api-transaction.js" + } + }, + "/_api/traversal": { + "post": { + "description": "\n\nStarts a traversal starting from a given vertex and following. edges contained in a given edgeCollection. The request must contain the following attributes.
**A json post document with these Properties is required:**
  • sort: body (JavaScript) code of a custom comparison function for the edges. The signature of this function is (l, r) -> integer (where l and r are edges) and must return -1 if l is smaller than, +1 if l is greater than, and 0 if l and r are equal. The reason for this is the following: The order of edges returned for a certain vertex is undefined. This is because there is no natural order of edges for a vertex with multiple connected edges. To explicitly define the order in which edges on the vertex are followed, you can specify an edge comparator function with this attribute. Note that the value here has to be a string to conform to the JSON standard, which in turn is parsed as function body on the server side. Furthermore note that this attribute is only used for the standard expanders. If you use your custom expander you have to do the sorting yourself within the expander code.
  • direction: direction for traversal
    • if set, must be either \"outbound\", \"inbound\", or \"any\"
    • if not set, the expander attribute must be specified
  • minDepth: ANDed with any existing filters): visits only nodes in at least the given depth
  • startVertex: id of the startVertex, e.g. \"users/foo\".
  • visitor: body (JavaScript) code of custom visitor function function signature: (config, result, vertex, path, connected) -> void The visitor function can do anything, but its return value is ignored. To populate a result, use the result variable by reference. Note that the connected argument is only populated when the order attribute is set to \"preorder-expander\".
  • itemOrder: item iteration order can be \"forward\" or \"backward\"
  • strategy: traversal strategy can be \"depthfirst\" or \"breadthfirst\"
  • filter: default is to include all nodes: body (JavaScript code) of custom filter function function signature: (config, vertex, path) -> mixed can return four different string values:
    • \"exclude\" -> this vertex will not be visited.
    • \"prune\" -> the edges of this vertex will not be followed.
    • \"\" or undefined -> visit the vertex and follow it's edges.
    • Array -> containing any combination of the above. If there is at least one \"exclude\" or \"prune\" respectivly is contained, it's effect will occur.
  • init: body (JavaScript) code of custom result initialization function function signature: (config, result) -> void initialize any values in result with what is required
  • maxIterations: Maximum number of iterations in each traversal. This number can be set to prevent endless loops in traversal of cyclic graphs. When a traversal performs as many iterations as the maxIterations value, the traversal will abort with an error. If maxIterations is not set, a server-defined value may be used.
  • maxDepth: ANDed with any existing filters visits only nodes in at most the given depth
  • uniqueness: specifies uniqueness for vertices and edges visited if set, must be an object like this:
    \"uniqueness\": {\"vertices\": \"none\"|\"global\"|\"path\", \"edges\": \"none\"|\"global\"|\"path\"}
  • order: traversal order can be \"preorder\", \"postorder\" or \"preorder-expander\"
  • graphName: name of the graph that contains the edges. Either edgeCollection or graphName has to be given. In case both values are set the graphName is prefered.
  • expander: body (JavaScript) code of custom expander function must be set if direction attribute is not set function signature: (config, vertex, path) -> array expander must return an array of the connections for vertex each connection is an object with the attributes edge and vertex
  • edgeCollection: name of the collection that contains the edges.
\n\n
If the Traversal is successfully executed HTTP 200 will be returned. Additionally the result object will be returned by the traversal.
For successful traversals, the returned JSON object has the following properties:
  • error: boolean flag to indicate if an error occurred (false in this case)
  • code: the HTTP status code
  • result: the return value of the traversal
If the traversal specification is either missing or malformed, the server will respond with HTTP 400.
The body of the response will then contain a JSON object with additional error details. The object has the following attributes:
  • error: boolean flag to indicate that an error occurred (true in this case)
  • code: the HTTP status code
  • errorNum: the server error number
  • errorMessage: a descriptive error message

Example: In the following examples the underlying graph will contain five persons Alice, Bob, Charlie, Dave and Eve. We will have the following directed relations: - Alice knows Bob - Bob knows Charlie - Bob knows Dave - Eve knows Alice - Eve knows Bob
The starting vertex will always be Alice.
Follow only outbound edges


shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/traversal <<EOF\n{ \n  \"startVertex\" : \"persons/alice\", \n  \"graphName\" : \"knows_graph\", \n  \"direction\" : \"outbound\" \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : { \n    \"visited\" : { \n      \"vertices\" : [ \n        { \n          \"_id\" : \"persons/alice\", \n          \"_key\" : \"alice\", \n          \"_rev\" : \"890100167\", \n          \"name\" : \"Alice\" \n        }, \n        { \n          \"_id\" : \"persons/bob\", \n          \"_key\" : \"bob\", \n          \"_rev\" : \"890296775\", \n          \"name\" : \"Bob\" \n        }, \n        { \n          \"_id\" : \"persons/charlie\", \n          \"_key\" : \"charlie\", \n          \"_rev\" : \"890558919\", \n          \"name\" : \"Charlie\" \n        }, \n        { \n          \"_id\" : \"persons/dave\", \n          \"_key\" : \"dave\", \n          \"_rev\" : \"890755527\", \n          \"name\" : \"Dave\" \n        } \n      ], \n      \"paths\" : [ \n        { \n          \"edges\" : [ ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"890100167\", \n              \"name\" : \"Alice\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/891214279\", \n              \"_key\" : \"891214279\", \n              \"_rev\" : \"891214279\", \n              \"_from\" : \"persons/alice\", \n              \"_to\" : \"persons/bob\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"890100167\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/bob\", \n              \"_key\" : \"bob\", \n              \"_rev\" : \"890296775\", \n              \"name\" : \"Bob\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/891214279\", \n              \"_key\" : \"891214279\", \n              \"_rev\" : \"891214279\", \n              \"_from\" : \"persons/alice\", \n              \"_to\" : \"persons/bob\" \n            }, \n            { \n              \"_id\" : \"knows/891410887\", \n              \"_key\" : \"891410887\", \n              \"_rev\" : \"891410887\", \n              \"_from\" : \"persons/bob\", \n              \"_to\" : \"persons/charlie\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"890100167\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/bob\", \n              \"_key\" : \"bob\", \n              \"_rev\" : \"890296775\", \n              \"name\" : \"Bob\" \n            }, \n            { \n              \"_id\" : \"persons/charlie\", \n              \"_key\" : \"charlie\", \n              \"_rev\" : \"890558919\", \n              \"name\" : \"Charlie\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/891214279\", \n              \"_key\" : \"891214279\", \n              \"_rev\" : \"891214279\", \n              \"_from\" : \"persons/alice\", \n              \"_to\" : \"persons/bob\" \n            }, \n            { \n              \"_id\" : \"knows/891607495\", \n              \"_key\" : \"891607495\", \n              \"_rev\" : \"891607495\", \n              \"_from\" : \"persons/bob\", \n              \"_to\" : \"persons/dave\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"890100167\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/bob\", \n              \"_key\" : \"bob\", \n              \"_rev\" : \"890296775\", \n              \"name\" : \"Bob\" \n            }, \n            { \n              \"_id\" : \"persons/dave\", \n              \"_key\" : \"dave\", \n              \"_rev\" : \"890755527\", \n              \"name\" : \"Dave\" \n            } \n          ] \n        } \n      ] \n    } \n  }, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
Example: Follow only inbound edges

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/traversal <<EOF\n{ \n  \"startVertex\" : \"persons/alice\", \n  \"graphName\" : \"knows_graph\", \n  \"direction\" : \"inbound\" \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : { \n    \"visited\" : { \n      \"vertices\" : [ \n        { \n          \"_id\" : \"persons/alice\", \n          \"_key\" : \"alice\", \n          \"_rev\" : \"871619015\", \n          \"name\" : \"Alice\" \n        }, \n        { \n          \"_id\" : \"persons/eve\", \n          \"_key\" : \"eve\", \n          \"_rev\" : \"872470983\", \n          \"name\" : \"Eve\" \n        } \n      ], \n      \"paths\" : [ \n        { \n          \"edges\" : [ ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"871619015\", \n              \"name\" : \"Alice\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/873322951\", \n              \"_key\" : \"873322951\", \n              \"_rev\" : \"873322951\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/alice\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"871619015\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/eve\", \n              \"_key\" : \"eve\", \n              \"_rev\" : \"872470983\", \n              \"name\" : \"Eve\" \n            } \n          ] \n        } \n      ] \n    } \n  }, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
Example: Follow any direction of edges

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/traversal <<EOF\n{ \n  \"startVertex\" : \"persons/alice\", \n  \"graphName\" : \"knows_graph\", \n  \"direction\" : \"any\", \n  \"uniqueness\" : { \n    \"vertices\" : \"none\", \n    \"edges\" : \"global\" \n  } \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : { \n    \"visited\" : { \n      \"vertices\" : [ \n        { \n          \"_id\" : \"persons/alice\", \n          \"_key\" : \"alice\", \n          \"_rev\" : \"841537991\", \n          \"name\" : \"Alice\" \n        }, \n        { \n          \"_id\" : \"persons/eve\", \n          \"_key\" : \"eve\", \n          \"_rev\" : \"842389959\", \n          \"name\" : \"Eve\" \n        }, \n        { \n          \"_id\" : \"persons/bob\", \n          \"_key\" : \"bob\", \n          \"_rev\" : \"841734599\", \n          \"name\" : \"Bob\" \n        }, \n        { \n          \"_id\" : \"persons/alice\", \n          \"_key\" : \"alice\", \n          \"_rev\" : \"841537991\", \n          \"name\" : \"Alice\" \n        }, \n        { \n          \"_id\" : \"persons/charlie\", \n          \"_key\" : \"charlie\", \n          \"_rev\" : \"841996743\", \n          \"name\" : \"Charlie\" \n        }, \n        { \n          \"_id\" : \"persons/dave\", \n          \"_key\" : \"dave\", \n          \"_rev\" : \"842193351\", \n          \"name\" : \"Dave\" \n        } \n      ], \n      \"paths\" : [ \n        { \n          \"edges\" : [ ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"841537991\", \n              \"name\" : \"Alice\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/843241927\", \n              \"_key\" : \"843241927\", \n              \"_rev\" : \"843241927\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/alice\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"841537991\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/eve\", \n              \"_key\" : \"eve\", \n              \"_rev\" : \"842389959\", \n              \"name\" : \"Eve\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/843241927\", \n              \"_key\" : \"843241927\", \n              \"_rev\" : \"843241927\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/alice\" \n            }, \n            { \n              \"_id\" : \"knows/843438535\", \n              \"_key\" : \"843438535\", \n              \"_rev\" : \"843438535\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/bob\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"841537991\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/eve\", \n              \"_key\" : \"eve\", \n              \"_rev\" : \"842389959\", \n              \"name\" : \"Eve\" \n            }, \n            { \n              \"_id\" : \"persons/bob\", \n              \"_key\" : \"bob\", \n              \"_rev\" : \"841734599\", \n              \"name\" : \"Bob\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/843241927\", \n              \"_key\" : \"843241927\", \n              \"_rev\" : \"843241927\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/alice\" \n            }, \n            { \n              \"_id\" : \"knows/843438535\", \n              \"_key\" : \"843438535\", \n              \"_rev\" : \"843438535\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/bob\" \n            }, \n            { \n              \"_id\" : \"knows/842652103\", \n              \"_key\" : \"842652103\", \n              \"_rev\" : \"842652103\", \n              \"_from\" : \"persons/alice\", \n              \"_to\" : \"persons/bob\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"841537991\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/eve\", \n              \"_key\" : \"eve\", \n              \"_rev\" : \"842389959\", \n              \"name\" : \"Eve\" \n            }, \n            { \n              \"_id\" : \"persons/bob\", \n              \"_key\" : \"bob\", \n              \"_rev\" : \"841734599\", \n              \"name\" : \"Bob\" \n            }, \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"841537991\", \n              \"name\" : \"Alice\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/843241927\", \n              \"_key\" : \"843241927\", \n              \"_rev\" : \"843241927\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/alice\" \n            }, \n            { \n              \"_id\" : \"knows/843438535\", \n              \"_key\" : \"843438535\", \n              \"_rev\" : \"843438535\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/bob\" \n            }, \n            { \n              \"_id\" : \"knows/842848711\", \n              \"_key\" : \"842848711\", \n              \"_rev\" : \"842848711\", \n              \"_from\" : \"persons/bob\", \n              \"_to\" : \"persons/charlie\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"841537991\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/eve\", \n              \"_key\" : \"eve\", \n              \"_rev\" : \"842389959\", \n              \"name\" : \"Eve\" \n            }, \n            { \n              \"_id\" : \"persons/bob\", \n              \"_key\" : \"bob\", \n              \"_rev\" : \"841734599\", \n              \"name\" : \"Bob\" \n            }, \n            { \n              \"_id\" : \"persons/charlie\", \n              \"_key\" : \"charlie\", \n              \"_rev\" : \"841996743\", \n              \"name\" : \"Charlie\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/843241927\", \n              \"_key\" : \"843241927\", \n              \"_rev\" : \"843241927\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/alice\" \n            }, \n            { \n              \"_id\" : \"knows/843438535\", \n              \"_key\" : \"843438535\", \n              \"_rev\" : \"843438535\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/bob\" \n            }, \n            { \n              \"_id\" : \"knows/843045319\", \n              \"_key\" : \"843045319\", \n              \"_rev\" : \"843045319\", \n              \"_from\" : \"persons/bob\", \n              \"_to\" : \"persons/dave\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"841537991\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/eve\", \n              \"_key\" : \"eve\", \n              \"_rev\" : \"842389959\", \n              \"name\" : \"Eve\" \n            }, \n            { \n              \"_id\" : \"persons/bob\", \n              \"_key\" : \"bob\", \n              \"_rev\" : \"841734599\", \n              \"name\" : \"Bob\" \n            }, \n            { \n              \"_id\" : \"persons/dave\", \n              \"_key\" : \"dave\", \n              \"_rev\" : \"842193351\", \n              \"name\" : \"Dave\" \n            } \n          ] \n        } \n      ] \n    } \n  }, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
Example: Excluding Charlie and Bob

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/traversal <<EOF\n{ \n  \"startVertex\" : \"persons/alice\", \n  \"graphName\" : \"knows_graph\", \n  \"direction\" : \"outbound\", \n  \"filter\" : \"if (vertex.name === \\\"Bob\\\" ||     vertex.name === \\\"Charlie\\\") {  return \\\"exclude\\\";}return;\" \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : { \n    \"visited\" : { \n      \"vertices\" : [ \n        { \n          \"_id\" : \"persons/alice\", \n          \"_key\" : \"alice\", \n          \"_rev\" : \"863427015\", \n          \"name\" : \"Alice\" \n        }, \n        { \n          \"_id\" : \"persons/dave\", \n          \"_key\" : \"dave\", \n          \"_rev\" : \"864082375\", \n          \"name\" : \"Dave\" \n        } \n      ], \n      \"paths\" : [ \n        { \n          \"edges\" : [ ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"863427015\", \n              \"name\" : \"Alice\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/864541127\", \n              \"_key\" : \"864541127\", \n              \"_rev\" : \"864541127\", \n              \"_from\" : \"persons/alice\", \n              \"_to\" : \"persons/bob\" \n            }, \n            { \n              \"_id\" : \"knows/864934343\", \n              \"_key\" : \"864934343\", \n              \"_rev\" : \"864934343\", \n              \"_from\" : \"persons/bob\", \n              \"_to\" : \"persons/dave\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"863427015\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/bob\", \n              \"_key\" : \"bob\", \n              \"_rev\" : \"863623623\", \n              \"name\" : \"Bob\" \n            }, \n            { \n              \"_id\" : \"persons/dave\", \n              \"_key\" : \"dave\", \n              \"_rev\" : \"864082375\", \n              \"name\" : \"Dave\" \n            } \n          ] \n        } \n      ] \n    } \n  }, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
Example: Do not follow edges from Bob

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/traversal <<EOF\n{ \n  \"startVertex\" : \"persons/alice\", \n  \"graphName\" : \"knows_graph\", \n  \"direction\" : \"outbound\", \n  \"filter\" : \"if (vertex.name === \\\"Bob\\\") {return \\\"prune\\\";}return;\" \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : { \n    \"visited\" : { \n      \"vertices\" : [ \n        { \n          \"_id\" : \"persons/alice\", \n          \"_key\" : \"alice\", \n          \"_rev\" : \"867686855\", \n          \"name\" : \"Alice\" \n        }, \n        { \n          \"_id\" : \"persons/bob\", \n          \"_key\" : \"bob\", \n          \"_rev\" : \"867883463\", \n          \"name\" : \"Bob\" \n        } \n      ], \n      \"paths\" : [ \n        { \n          \"edges\" : [ ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"867686855\", \n              \"name\" : \"Alice\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/868800967\", \n              \"_key\" : \"868800967\", \n              \"_rev\" : \"868800967\", \n              \"_from\" : \"persons/alice\", \n              \"_to\" : \"persons/bob\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"867686855\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/bob\", \n              \"_key\" : \"bob\", \n              \"_rev\" : \"867883463\", \n              \"name\" : \"Bob\" \n            } \n          ] \n        } \n      ] \n    } \n  }, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
Example: Visit only nodes in a depth of at least 2

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/traversal <<EOF\n{ \n  \"startVertex\" : \"persons/alice\", \n  \"graphName\" : \"knows_graph\", \n  \"direction\" : \"outbound\", \n  \"minDepth\" : 2 \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : { \n    \"visited\" : { \n      \"vertices\" : [ \n        { \n          \"_id\" : \"persons/charlie\", \n          \"_key\" : \"charlie\", \n          \"_rev\" : \"886299079\", \n          \"name\" : \"Charlie\" \n        }, \n        { \n          \"_id\" : \"persons/dave\", \n          \"_key\" : \"dave\", \n          \"_rev\" : \"886495687\", \n          \"name\" : \"Dave\" \n        } \n      ], \n      \"paths\" : [ \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/886954439\", \n              \"_key\" : \"886954439\", \n              \"_rev\" : \"886954439\", \n              \"_from\" : \"persons/alice\", \n              \"_to\" : \"persons/bob\" \n            }, \n            { \n              \"_id\" : \"knows/887151047\", \n              \"_key\" : \"887151047\", \n              \"_rev\" : \"887151047\", \n              \"_from\" : \"persons/bob\", \n              \"_to\" : \"persons/charlie\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"885840327\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/bob\", \n              \"_key\" : \"bob\", \n              \"_rev\" : \"886036935\", \n              \"name\" : \"Bob\" \n            }, \n            { \n              \"_id\" : \"persons/charlie\", \n              \"_key\" : \"charlie\", \n              \"_rev\" : \"886299079\", \n              \"name\" : \"Charlie\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/886954439\", \n              \"_key\" : \"886954439\", \n              \"_rev\" : \"886954439\", \n              \"_from\" : \"persons/alice\", \n              \"_to\" : \"persons/bob\" \n            }, \n            { \n              \"_id\" : \"knows/887347655\", \n              \"_key\" : \"887347655\", \n              \"_rev\" : \"887347655\", \n              \"_from\" : \"persons/bob\", \n              \"_to\" : \"persons/dave\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"885840327\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/bob\", \n              \"_key\" : \"bob\", \n              \"_rev\" : \"886036935\", \n              \"name\" : \"Bob\" \n            }, \n            { \n              \"_id\" : \"persons/dave\", \n              \"_key\" : \"dave\", \n              \"_rev\" : \"886495687\", \n              \"name\" : \"Dave\" \n            } \n          ] \n        } \n      ] \n    } \n  }, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
Example: Visit only nodes in a depth of at most 1

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/traversal <<EOF\n{ \n  \"startVertex\" : \"persons/alice\", \n  \"graphName\" : \"knows_graph\", \n  \"direction\" : \"outbound\", \n  \"maxDepth\" : 1 \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : { \n    \"visited\" : { \n      \"vertices\" : [ \n        { \n          \"_id\" : \"persons/alice\", \n          \"_key\" : \"alice\", \n          \"_rev\" : \"875616711\", \n          \"name\" : \"Alice\" \n        }, \n        { \n          \"_id\" : \"persons/bob\", \n          \"_key\" : \"bob\", \n          \"_rev\" : \"875813319\", \n          \"name\" : \"Bob\" \n        } \n      ], \n      \"paths\" : [ \n        { \n          \"edges\" : [ ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"875616711\", \n              \"name\" : \"Alice\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/876730823\", \n              \"_key\" : \"876730823\", \n              \"_rev\" : \"876730823\", \n              \"_from\" : \"persons/alice\", \n              \"_to\" : \"persons/bob\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"875616711\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/bob\", \n              \"_key\" : \"bob\", \n              \"_rev\" : \"875813319\", \n              \"name\" : \"Bob\" \n            } \n          ] \n        } \n      ] \n    } \n  }, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
Example: Using a visitor function to return vertex ids only

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/traversal <<EOF\n{ \n  \"startVertex\" : \"persons/alice\", \n  \"graphName\" : \"knows_graph\", \n  \"direction\" : \"outbound\", \n  \"visitor\" : \"result.visited.vertices.push(vertex._id);\" \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : { \n    \"visited\" : { \n      \"vertices\" : [ \n        \"persons/alice\", \n        \"persons/bob\", \n        \"persons/charlie\", \n        \"persons/dave\" \n      ], \n      \"paths\" : [ ] \n    } \n  }, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
Example: Count all visited nodes and return a list of nodes only

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/traversal <<EOF\n{ \n  \"startVertex\" : \"persons/alice\", \n  \"graphName\" : \"knows_graph\", \n  \"direction\" : \"outbound\", \n  \"init\" : \"result.visited = 0; result.myVertices = [ ];\", \n  \"visitor\" : \"result.visited++; result.myVertices.push(vertex);\" \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : { \n    \"visited\" : 4, \n    \"myVertices\" : [ \n      { \n        \"_id\" : \"persons/alice\", \n        \"_key\" : \"alice\", \n        \"_rev\" : \"900323783\", \n        \"name\" : \"Alice\" \n      }, \n      { \n        \"_id\" : \"persons/bob\", \n        \"_key\" : \"bob\", \n        \"_rev\" : \"900520391\", \n        \"name\" : \"Bob\" \n      }, \n      { \n        \"_id\" : \"persons/charlie\", \n        \"_key\" : \"charlie\", \n        \"_rev\" : \"900782535\", \n        \"name\" : \"Charlie\" \n      }, \n      { \n        \"_id\" : \"persons/dave\", \n        \"_key\" : \"dave\", \n        \"_rev\" : \"900979143\", \n        \"name\" : \"Dave\" \n      } \n    ] \n  }, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
Example: Expand only inbound edges of Alice and outbound edges of Eve

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/traversal <<EOF\n{ \n  \"startVertex\" : \"persons/alice\", \n  \"graphName\" : \"knows_graph\", \n  \"expander\" : \"var connections = [ ];if (vertex.name === \\\"Alice\\\") {config.datasource.getInEdges(vertex).forEach(function (e) {connections.push({ vertex: require(\\\"internal\\\").db._document(e._from), edge: e});});}if (vertex.name === \\\"Eve\\\") {config.datasource.getOutEdges(vertex).forEach(function (e) {connections.push({vertex: require(\\\"internal\\\").db._document(e._to), edge: e});});}return connections;\" \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : { \n    \"visited\" : { \n      \"vertices\" : [ \n        { \n          \"_id\" : \"persons/alice\", \n          \"_key\" : \"alice\", \n          \"_rev\" : \"904583623\", \n          \"name\" : \"Alice\" \n        }, \n        { \n          \"_id\" : \"persons/eve\", \n          \"_key\" : \"eve\", \n          \"_rev\" : \"905435591\", \n          \"name\" : \"Eve\" \n        }, \n        { \n          \"_id\" : \"persons/bob\", \n          \"_key\" : \"bob\", \n          \"_rev\" : \"904780231\", \n          \"name\" : \"Bob\" \n        } \n      ], \n      \"paths\" : [ \n        { \n          \"edges\" : [ ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"904583623\", \n              \"name\" : \"Alice\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/906287559\", \n              \"_key\" : \"906287559\", \n              \"_rev\" : \"906287559\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/alice\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"904583623\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/eve\", \n              \"_key\" : \"eve\", \n              \"_rev\" : \"905435591\", \n              \"name\" : \"Eve\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/906287559\", \n              \"_key\" : \"906287559\", \n              \"_rev\" : \"906287559\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/alice\" \n            }, \n            { \n              \"_id\" : \"knows/906484167\", \n              \"_key\" : \"906484167\", \n              \"_rev\" : \"906484167\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/bob\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"904583623\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/eve\", \n              \"_key\" : \"eve\", \n              \"_rev\" : \"905435591\", \n              \"name\" : \"Eve\" \n            }, \n            { \n              \"_id\" : \"persons/bob\", \n              \"_key\" : \"bob\", \n              \"_rev\" : \"904780231\", \n              \"name\" : \"Bob\" \n            } \n          ] \n        } \n      ] \n    } \n  }, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
Example: Follow the depthfirst strategy

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/traversal <<EOF\n{ \n  \"startVertex\" : \"persons/alice\", \n  \"graphName\" : \"knows_graph\", \n  \"direction\" : \"any\", \n  \"strategy\" : \"depthfirst\" \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : { \n    \"visited\" : { \n      \"vertices\" : [ \n        { \n          \"_id\" : \"persons/alice\", \n          \"_key\" : \"alice\", \n          \"_rev\" : \"852482503\", \n          \"name\" : \"Alice\" \n        }, \n        { \n          \"_id\" : \"persons/eve\", \n          \"_key\" : \"eve\", \n          \"_rev\" : \"853334471\", \n          \"name\" : \"Eve\" \n        }, \n        { \n          \"_id\" : \"persons/bob\", \n          \"_key\" : \"bob\", \n          \"_rev\" : \"852679111\", \n          \"name\" : \"Bob\" \n        }, \n        { \n          \"_id\" : \"persons/alice\", \n          \"_key\" : \"alice\", \n          \"_rev\" : \"852482503\", \n          \"name\" : \"Alice\" \n        }, \n        { \n          \"_id\" : \"persons/charlie\", \n          \"_key\" : \"charlie\", \n          \"_rev\" : \"852941255\", \n          \"name\" : \"Charlie\" \n        }, \n        { \n          \"_id\" : \"persons/dave\", \n          \"_key\" : \"dave\", \n          \"_rev\" : \"853137863\", \n          \"name\" : \"Dave\" \n        }, \n        { \n          \"_id\" : \"persons/bob\", \n          \"_key\" : \"bob\", \n          \"_rev\" : \"852679111\", \n          \"name\" : \"Bob\" \n        }, \n        { \n          \"_id\" : \"persons/eve\", \n          \"_key\" : \"eve\", \n          \"_rev\" : \"853334471\", \n          \"name\" : \"Eve\" \n        }, \n        { \n          \"_id\" : \"persons/alice\", \n          \"_key\" : \"alice\", \n          \"_rev\" : \"852482503\", \n          \"name\" : \"Alice\" \n        }, \n        { \n          \"_id\" : \"persons/charlie\", \n          \"_key\" : \"charlie\", \n          \"_rev\" : \"852941255\", \n          \"name\" : \"Charlie\" \n        }, \n        { \n          \"_id\" : \"persons/dave\", \n          \"_key\" : \"dave\", \n          \"_rev\" : \"853137863\", \n          \"name\" : \"Dave\" \n        } \n      ], \n      \"paths\" : [ \n        { \n          \"edges\" : [ ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"852482503\", \n              \"name\" : \"Alice\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/854186439\", \n              \"_key\" : \"854186439\", \n              \"_rev\" : \"854186439\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/alice\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"852482503\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/eve\", \n              \"_key\" : \"eve\", \n              \"_rev\" : \"853334471\", \n              \"name\" : \"Eve\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/854186439\", \n              \"_key\" : \"854186439\", \n              \"_rev\" : \"854186439\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/alice\" \n            }, \n            { \n              \"_id\" : \"knows/854383047\", \n              \"_key\" : \"854383047\", \n              \"_rev\" : \"854383047\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/bob\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"852482503\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/eve\", \n              \"_key\" : \"eve\", \n              \"_rev\" : \"853334471\", \n              \"name\" : \"Eve\" \n            }, \n            { \n              \"_id\" : \"persons/bob\", \n              \"_key\" : \"bob\", \n              \"_rev\" : \"852679111\", \n              \"name\" : \"Bob\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/854186439\", \n              \"_key\" : \"854186439\", \n              \"_rev\" : \"854186439\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/alice\" \n            }, \n            { \n              \"_id\" : \"knows/854383047\", \n              \"_key\" : \"854383047\", \n              \"_rev\" : \"854383047\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/bob\" \n            }, \n            { \n              \"_id\" : \"knows/853596615\", \n              \"_key\" : \"853596615\", \n              \"_rev\" : \"853596615\", \n              \"_from\" : \"persons/alice\", \n              \"_to\" : \"persons/bob\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"852482503\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/eve\", \n              \"_key\" : \"eve\", \n              \"_rev\" : \"853334471\", \n              \"name\" : \"Eve\" \n            }, \n            { \n              \"_id\" : \"persons/bob\", \n              \"_key\" : \"bob\", \n              \"_rev\" : \"852679111\", \n              \"name\" : \"Bob\" \n            }, \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"852482503\", \n              \"name\" : \"Alice\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/854186439\", \n              \"_key\" : \"854186439\", \n              \"_rev\" : \"854186439\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/alice\" \n            }, \n            { \n              \"_id\" : \"knows/854383047\", \n              \"_key\" : \"854383047\", \n              \"_rev\" : \"854383047\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/bob\" \n            }, \n            { \n              \"_id\" : \"knows/853793223\", \n              \"_key\" : \"853793223\", \n              \"_rev\" : \"853793223\", \n              \"_from\" : \"persons/bob\", \n              \"_to\" : \"persons/charlie\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"852482503\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/eve\", \n              \"_key\" : \"eve\", \n              \"_rev\" : \"853334471\", \n              \"name\" : \"Eve\" \n            }, \n            { \n              \"_id\" : \"persons/bob\", \n              \"_key\" : \"bob\", \n              \"_rev\" : \"852679111\", \n              \"name\" : \"Bob\" \n            }, \n            { \n              \"_id\" : \"persons/charlie\", \n              \"_key\" : \"charlie\", \n              \"_rev\" : \"852941255\", \n              \"name\" : \"Charlie\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/854186439\", \n              \"_key\" : \"854186439\", \n              \"_rev\" : \"854186439\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/alice\" \n            }, \n            { \n              \"_id\" : \"knows/854383047\", \n              \"_key\" : \"854383047\", \n              \"_rev\" : \"854383047\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/bob\" \n            }, \n            { \n              \"_id\" : \"knows/853989831\", \n              \"_key\" : \"853989831\", \n              \"_rev\" : \"853989831\", \n              \"_from\" : \"persons/bob\", \n              \"_to\" : \"persons/dave\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"852482503\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/eve\", \n              \"_key\" : \"eve\", \n              \"_rev\" : \"853334471\", \n              \"name\" : \"Eve\" \n            }, \n            { \n              \"_id\" : \"persons/bob\", \n              \"_key\" : \"bob\", \n              \"_rev\" : \"852679111\", \n              \"name\" : \"Bob\" \n            }, \n            { \n              \"_id\" : \"persons/dave\", \n              \"_key\" : \"dave\", \n              \"_rev\" : \"853137863\", \n              \"name\" : \"Dave\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/853596615\", \n              \"_key\" : \"853596615\", \n              \"_rev\" : \"853596615\", \n              \"_from\" : \"persons/alice\", \n              \"_to\" : \"persons/bob\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"852482503\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/bob\", \n              \"_key\" : \"bob\", \n              \"_rev\" : \"852679111\", \n              \"name\" : \"Bob\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/853596615\", \n              \"_key\" : \"853596615\", \n              \"_rev\" : \"853596615\", \n              \"_from\" : \"persons/alice\", \n              \"_to\" : \"persons/bob\" \n            }, \n            { \n              \"_id\" : \"knows/854383047\", \n              \"_key\" : \"854383047\", \n              \"_rev\" : \"854383047\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/bob\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"852482503\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/bob\", \n              \"_key\" : \"bob\", \n              \"_rev\" : \"852679111\", \n              \"name\" : \"Bob\" \n            }, \n            { \n              \"_id\" : \"persons/eve\", \n              \"_key\" : \"eve\", \n              \"_rev\" : \"853334471\", \n              \"name\" : \"Eve\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/853596615\", \n              \"_key\" : \"853596615\", \n              \"_rev\" : \"853596615\", \n              \"_from\" : \"persons/alice\", \n              \"_to\" : \"persons/bob\" \n            }, \n            { \n              \"_id\" : \"knows/854383047\", \n              \"_key\" : \"854383047\", \n              \"_rev\" : \"854383047\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/bob\" \n            }, \n            { \n              \"_id\" : \"knows/854186439\", \n              \"_key\" : \"854186439\", \n              \"_rev\" : \"854186439\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/alice\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"852482503\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/bob\", \n              \"_key\" : \"bob\", \n              \"_rev\" : \"852679111\", \n              \"name\" : \"Bob\" \n            }, \n            { \n              \"_id\" : \"persons/eve\", \n              \"_key\" : \"eve\", \n              \"_rev\" : \"853334471\", \n              \"name\" : \"Eve\" \n            }, \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"852482503\", \n              \"name\" : \"Alice\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/853596615\", \n              \"_key\" : \"853596615\", \n              \"_rev\" : \"853596615\", \n              \"_from\" : \"persons/alice\", \n              \"_to\" : \"persons/bob\" \n            }, \n            { \n              \"_id\" : \"knows/853793223\", \n              \"_key\" : \"853793223\", \n              \"_rev\" : \"853793223\", \n              \"_from\" : \"persons/bob\", \n              \"_to\" : \"persons/charlie\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"852482503\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/bob\", \n              \"_key\" : \"bob\", \n              \"_rev\" : \"852679111\", \n              \"name\" : \"Bob\" \n            }, \n            { \n              \"_id\" : \"persons/charlie\", \n              \"_key\" : \"charlie\", \n              \"_rev\" : \"852941255\", \n              \"name\" : \"Charlie\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/853596615\", \n              \"_key\" : \"853596615\", \n              \"_rev\" : \"853596615\", \n              \"_from\" : \"persons/alice\", \n              \"_to\" : \"persons/bob\" \n            }, \n            { \n              \"_id\" : \"knows/853989831\", \n              \"_key\" : \"853989831\", \n              \"_rev\" : \"853989831\", \n              \"_from\" : \"persons/bob\", \n              \"_to\" : \"persons/dave\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"852482503\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/bob\", \n              \"_key\" : \"bob\", \n              \"_rev\" : \"852679111\", \n              \"name\" : \"Bob\" \n            }, \n            { \n              \"_id\" : \"persons/dave\", \n              \"_key\" : \"dave\", \n              \"_rev\" : \"853137863\", \n              \"name\" : \"Dave\" \n            } \n          ] \n        } \n      ] \n    } \n  }, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
Example: Using postorder ordering

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/traversal <<EOF\n{ \n  \"startVertex\" : \"persons/alice\", \n  \"graphName\" : \"knows_graph\", \n  \"direction\" : \"any\", \n  \"order\" : \"postorder\" \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : { \n    \"visited\" : { \n      \"vertices\" : [ \n        { \n          \"_id\" : \"persons/alice\", \n          \"_key\" : \"alice\", \n          \"_rev\" : \"894360007\", \n          \"name\" : \"Alice\" \n        }, \n        { \n          \"_id\" : \"persons/charlie\", \n          \"_key\" : \"charlie\", \n          \"_rev\" : \"894818759\", \n          \"name\" : \"Charlie\" \n        }, \n        { \n          \"_id\" : \"persons/dave\", \n          \"_key\" : \"dave\", \n          \"_rev\" : \"895015367\", \n          \"name\" : \"Dave\" \n        }, \n        { \n          \"_id\" : \"persons/bob\", \n          \"_key\" : \"bob\", \n          \"_rev\" : \"894556615\", \n          \"name\" : \"Bob\" \n        }, \n        { \n          \"_id\" : \"persons/eve\", \n          \"_key\" : \"eve\", \n          \"_rev\" : \"895211975\", \n          \"name\" : \"Eve\" \n        }, \n        { \n          \"_id\" : \"persons/alice\", \n          \"_key\" : \"alice\", \n          \"_rev\" : \"894360007\", \n          \"name\" : \"Alice\" \n        }, \n        { \n          \"_id\" : \"persons/eve\", \n          \"_key\" : \"eve\", \n          \"_rev\" : \"895211975\", \n          \"name\" : \"Eve\" \n        }, \n        { \n          \"_id\" : \"persons/charlie\", \n          \"_key\" : \"charlie\", \n          \"_rev\" : \"894818759\", \n          \"name\" : \"Charlie\" \n        }, \n        { \n          \"_id\" : \"persons/dave\", \n          \"_key\" : \"dave\", \n          \"_rev\" : \"895015367\", \n          \"name\" : \"Dave\" \n        }, \n        { \n          \"_id\" : \"persons/bob\", \n          \"_key\" : \"bob\", \n          \"_rev\" : \"894556615\", \n          \"name\" : \"Bob\" \n        }, \n        { \n          \"_id\" : \"persons/alice\", \n          \"_key\" : \"alice\", \n          \"_rev\" : \"894360007\", \n          \"name\" : \"Alice\" \n        } \n      ], \n      \"paths\" : [ \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/896063943\", \n              \"_key\" : \"896063943\", \n              \"_rev\" : \"896063943\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/alice\" \n            }, \n            { \n              \"_id\" : \"knows/896260551\", \n              \"_key\" : \"896260551\", \n              \"_rev\" : \"896260551\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/bob\" \n            }, \n            { \n              \"_id\" : \"knows/895474119\", \n              \"_key\" : \"895474119\", \n              \"_rev\" : \"895474119\", \n              \"_from\" : \"persons/alice\", \n              \"_to\" : \"persons/bob\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"894360007\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/eve\", \n              \"_key\" : \"eve\", \n              \"_rev\" : \"895211975\", \n              \"name\" : \"Eve\" \n            }, \n            { \n              \"_id\" : \"persons/bob\", \n              \"_key\" : \"bob\", \n              \"_rev\" : \"894556615\", \n              \"name\" : \"Bob\" \n            }, \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"894360007\", \n              \"name\" : \"Alice\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/896063943\", \n              \"_key\" : \"896063943\", \n              \"_rev\" : \"896063943\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/alice\" \n            }, \n            { \n              \"_id\" : \"knows/896260551\", \n              \"_key\" : \"896260551\", \n              \"_rev\" : \"896260551\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/bob\" \n            }, \n            { \n              \"_id\" : \"knows/895670727\", \n              \"_key\" : \"895670727\", \n              \"_rev\" : \"895670727\", \n              \"_from\" : \"persons/bob\", \n              \"_to\" : \"persons/charlie\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"894360007\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/eve\", \n              \"_key\" : \"eve\", \n              \"_rev\" : \"895211975\", \n              \"name\" : \"Eve\" \n            }, \n            { \n              \"_id\" : \"persons/bob\", \n              \"_key\" : \"bob\", \n              \"_rev\" : \"894556615\", \n              \"name\" : \"Bob\" \n            }, \n            { \n              \"_id\" : \"persons/charlie\", \n              \"_key\" : \"charlie\", \n              \"_rev\" : \"894818759\", \n              \"name\" : \"Charlie\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/896063943\", \n              \"_key\" : \"896063943\", \n              \"_rev\" : \"896063943\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/alice\" \n            }, \n            { \n              \"_id\" : \"knows/896260551\", \n              \"_key\" : \"896260551\", \n              \"_rev\" : \"896260551\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/bob\" \n            }, \n            { \n              \"_id\" : \"knows/895867335\", \n              \"_key\" : \"895867335\", \n              \"_rev\" : \"895867335\", \n              \"_from\" : \"persons/bob\", \n              \"_to\" : \"persons/dave\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"894360007\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/eve\", \n              \"_key\" : \"eve\", \n              \"_rev\" : \"895211975\", \n              \"name\" : \"Eve\" \n            }, \n            { \n              \"_id\" : \"persons/bob\", \n              \"_key\" : \"bob\", \n              \"_rev\" : \"894556615\", \n              \"name\" : \"Bob\" \n            }, \n            { \n              \"_id\" : \"persons/dave\", \n              \"_key\" : \"dave\", \n              \"_rev\" : \"895015367\", \n              \"name\" : \"Dave\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/896063943\", \n              \"_key\" : \"896063943\", \n              \"_rev\" : \"896063943\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/alice\" \n            }, \n            { \n              \"_id\" : \"knows/896260551\", \n              \"_key\" : \"896260551\", \n              \"_rev\" : \"896260551\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/bob\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"894360007\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/eve\", \n              \"_key\" : \"eve\", \n              \"_rev\" : \"895211975\", \n              \"name\" : \"Eve\" \n            }, \n            { \n              \"_id\" : \"persons/bob\", \n              \"_key\" : \"bob\", \n              \"_rev\" : \"894556615\", \n              \"name\" : \"Bob\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/896063943\", \n              \"_key\" : \"896063943\", \n              \"_rev\" : \"896063943\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/alice\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"894360007\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/eve\", \n              \"_key\" : \"eve\", \n              \"_rev\" : \"895211975\", \n              \"name\" : \"Eve\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/895474119\", \n              \"_key\" : \"895474119\", \n              \"_rev\" : \"895474119\", \n              \"_from\" : \"persons/alice\", \n              \"_to\" : \"persons/bob\" \n            }, \n            { \n              \"_id\" : \"knows/896260551\", \n              \"_key\" : \"896260551\", \n              \"_rev\" : \"896260551\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/bob\" \n            }, \n            { \n              \"_id\" : \"knows/896063943\", \n              \"_key\" : \"896063943\", \n              \"_rev\" : \"896063943\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/alice\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"894360007\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/bob\", \n              \"_key\" : \"bob\", \n              \"_rev\" : \"894556615\", \n              \"name\" : \"Bob\" \n            }, \n            { \n              \"_id\" : \"persons/eve\", \n              \"_key\" : \"eve\", \n              \"_rev\" : \"895211975\", \n              \"name\" : \"Eve\" \n            }, \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"894360007\", \n              \"name\" : \"Alice\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/895474119\", \n              \"_key\" : \"895474119\", \n              \"_rev\" : \"895474119\", \n              \"_from\" : \"persons/alice\", \n              \"_to\" : \"persons/bob\" \n            }, \n            { \n              \"_id\" : \"knows/896260551\", \n              \"_key\" : \"896260551\", \n              \"_rev\" : \"896260551\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/bob\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"894360007\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/bob\", \n              \"_key\" : \"bob\", \n              \"_rev\" : \"894556615\", \n              \"name\" : \"Bob\" \n            }, \n            { \n              \"_id\" : \"persons/eve\", \n              \"_key\" : \"eve\", \n              \"_rev\" : \"895211975\", \n              \"name\" : \"Eve\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/895474119\", \n              \"_key\" : \"895474119\", \n              \"_rev\" : \"895474119\", \n              \"_from\" : \"persons/alice\", \n              \"_to\" : \"persons/bob\" \n            }, \n            { \n              \"_id\" : \"knows/895670727\", \n              \"_key\" : \"895670727\", \n              \"_rev\" : \"895670727\", \n              \"_from\" : \"persons/bob\", \n              \"_to\" : \"persons/charlie\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"894360007\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/bob\", \n              \"_key\" : \"bob\", \n              \"_rev\" : \"894556615\", \n              \"name\" : \"Bob\" \n            }, \n            { \n              \"_id\" : \"persons/charlie\", \n              \"_key\" : \"charlie\", \n              \"_rev\" : \"894818759\", \n              \"name\" : \"Charlie\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/895474119\", \n              \"_key\" : \"895474119\", \n              \"_rev\" : \"895474119\", \n              \"_from\" : \"persons/alice\", \n              \"_to\" : \"persons/bob\" \n            }, \n            { \n              \"_id\" : \"knows/895867335\", \n              \"_key\" : \"895867335\", \n              \"_rev\" : \"895867335\", \n              \"_from\" : \"persons/bob\", \n              \"_to\" : \"persons/dave\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"894360007\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/bob\", \n              \"_key\" : \"bob\", \n              \"_rev\" : \"894556615\", \n              \"name\" : \"Bob\" \n            }, \n            { \n              \"_id\" : \"persons/dave\", \n              \"_key\" : \"dave\", \n              \"_rev\" : \"895015367\", \n              \"name\" : \"Dave\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/895474119\", \n              \"_key\" : \"895474119\", \n              \"_rev\" : \"895474119\", \n              \"_from\" : \"persons/alice\", \n              \"_to\" : \"persons/bob\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"894360007\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/bob\", \n              \"_key\" : \"bob\", \n              \"_rev\" : \"894556615\", \n              \"name\" : \"Bob\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"894360007\", \n              \"name\" : \"Alice\" \n            } \n          ] \n        } \n      ] \n    } \n  }, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
Example: Using backward item-ordering:

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/traversal <<EOF\n{ \n  \"startVertex\" : \"persons/alice\", \n  \"graphName\" : \"knows_graph\", \n  \"direction\" : \"any\", \n  \"itemOrder\" : \"backward\" \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : { \n    \"visited\" : { \n      \"vertices\" : [ \n        { \n          \"_id\" : \"persons/alice\", \n          \"_key\" : \"alice\", \n          \"_rev\" : \"846518727\", \n          \"name\" : \"Alice\" \n        }, \n        { \n          \"_id\" : \"persons/bob\", \n          \"_key\" : \"bob\", \n          \"_rev\" : \"846715335\", \n          \"name\" : \"Bob\" \n        }, \n        { \n          \"_id\" : \"persons/dave\", \n          \"_key\" : \"dave\", \n          \"_rev\" : \"847174087\", \n          \"name\" : \"Dave\" \n        }, \n        { \n          \"_id\" : \"persons/charlie\", \n          \"_key\" : \"charlie\", \n          \"_rev\" : \"846977479\", \n          \"name\" : \"Charlie\" \n        }, \n        { \n          \"_id\" : \"persons/eve\", \n          \"_key\" : \"eve\", \n          \"_rev\" : \"847370695\", \n          \"name\" : \"Eve\" \n        }, \n        { \n          \"_id\" : \"persons/alice\", \n          \"_key\" : \"alice\", \n          \"_rev\" : \"846518727\", \n          \"name\" : \"Alice\" \n        }, \n        { \n          \"_id\" : \"persons/eve\", \n          \"_key\" : \"eve\", \n          \"_rev\" : \"847370695\", \n          \"name\" : \"Eve\" \n        }, \n        { \n          \"_id\" : \"persons/bob\", \n          \"_key\" : \"bob\", \n          \"_rev\" : \"846715335\", \n          \"name\" : \"Bob\" \n        }, \n        { \n          \"_id\" : \"persons/dave\", \n          \"_key\" : \"dave\", \n          \"_rev\" : \"847174087\", \n          \"name\" : \"Dave\" \n        }, \n        { \n          \"_id\" : \"persons/charlie\", \n          \"_key\" : \"charlie\", \n          \"_rev\" : \"846977479\", \n          \"name\" : \"Charlie\" \n        }, \n        { \n          \"_id\" : \"persons/alice\", \n          \"_key\" : \"alice\", \n          \"_rev\" : \"846518727\", \n          \"name\" : \"Alice\" \n        } \n      ], \n      \"paths\" : [ \n        { \n          \"edges\" : [ ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"846518727\", \n              \"name\" : \"Alice\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/847632839\", \n              \"_key\" : \"847632839\", \n              \"_rev\" : \"847632839\", \n              \"_from\" : \"persons/alice\", \n              \"_to\" : \"persons/bob\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"846518727\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/bob\", \n              \"_key\" : \"bob\", \n              \"_rev\" : \"846715335\", \n              \"name\" : \"Bob\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/847632839\", \n              \"_key\" : \"847632839\", \n              \"_rev\" : \"847632839\", \n              \"_from\" : \"persons/alice\", \n              \"_to\" : \"persons/bob\" \n            }, \n            { \n              \"_id\" : \"knows/848026055\", \n              \"_key\" : \"848026055\", \n              \"_rev\" : \"848026055\", \n              \"_from\" : \"persons/bob\", \n              \"_to\" : \"persons/dave\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"846518727\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/bob\", \n              \"_key\" : \"bob\", \n              \"_rev\" : \"846715335\", \n              \"name\" : \"Bob\" \n            }, \n            { \n              \"_id\" : \"persons/dave\", \n              \"_key\" : \"dave\", \n              \"_rev\" : \"847174087\", \n              \"name\" : \"Dave\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/847632839\", \n              \"_key\" : \"847632839\", \n              \"_rev\" : \"847632839\", \n              \"_from\" : \"persons/alice\", \n              \"_to\" : \"persons/bob\" \n            }, \n            { \n              \"_id\" : \"knows/847829447\", \n              \"_key\" : \"847829447\", \n              \"_rev\" : \"847829447\", \n              \"_from\" : \"persons/bob\", \n              \"_to\" : \"persons/charlie\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"846518727\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/bob\", \n              \"_key\" : \"bob\", \n              \"_rev\" : \"846715335\", \n              \"name\" : \"Bob\" \n            }, \n            { \n              \"_id\" : \"persons/charlie\", \n              \"_key\" : \"charlie\", \n              \"_rev\" : \"846977479\", \n              \"name\" : \"Charlie\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/847632839\", \n              \"_key\" : \"847632839\", \n              \"_rev\" : \"847632839\", \n              \"_from\" : \"persons/alice\", \n              \"_to\" : \"persons/bob\" \n            }, \n            { \n              \"_id\" : \"knows/848419271\", \n              \"_key\" : \"848419271\", \n              \"_rev\" : \"848419271\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/bob\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"846518727\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/bob\", \n              \"_key\" : \"bob\", \n              \"_rev\" : \"846715335\", \n              \"name\" : \"Bob\" \n            }, \n            { \n              \"_id\" : \"persons/eve\", \n              \"_key\" : \"eve\", \n              \"_rev\" : \"847370695\", \n              \"name\" : \"Eve\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/847632839\", \n              \"_key\" : \"847632839\", \n              \"_rev\" : \"847632839\", \n              \"_from\" : \"persons/alice\", \n              \"_to\" : \"persons/bob\" \n            }, \n            { \n              \"_id\" : \"knows/848419271\", \n              \"_key\" : \"848419271\", \n              \"_rev\" : \"848419271\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/bob\" \n            }, \n            { \n              \"_id\" : \"knows/848222663\", \n              \"_key\" : \"848222663\", \n              \"_rev\" : \"848222663\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/alice\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"846518727\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/bob\", \n              \"_key\" : \"bob\", \n              \"_rev\" : \"846715335\", \n              \"name\" : \"Bob\" \n            }, \n            { \n              \"_id\" : \"persons/eve\", \n              \"_key\" : \"eve\", \n              \"_rev\" : \"847370695\", \n              \"name\" : \"Eve\" \n            }, \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"846518727\", \n              \"name\" : \"Alice\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/848222663\", \n              \"_key\" : \"848222663\", \n              \"_rev\" : \"848222663\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/alice\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"846518727\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/eve\", \n              \"_key\" : \"eve\", \n              \"_rev\" : \"847370695\", \n              \"name\" : \"Eve\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/848222663\", \n              \"_key\" : \"848222663\", \n              \"_rev\" : \"848222663\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/alice\" \n            }, \n            { \n              \"_id\" : \"knows/848419271\", \n              \"_key\" : \"848419271\", \n              \"_rev\" : \"848419271\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/bob\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"846518727\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/eve\", \n              \"_key\" : \"eve\", \n              \"_rev\" : \"847370695\", \n              \"name\" : \"Eve\" \n            }, \n            { \n              \"_id\" : \"persons/bob\", \n              \"_key\" : \"bob\", \n              \"_rev\" : \"846715335\", \n              \"name\" : \"Bob\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/848222663\", \n              \"_key\" : \"848222663\", \n              \"_rev\" : \"848222663\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/alice\" \n            }, \n            { \n              \"_id\" : \"knows/848419271\", \n              \"_key\" : \"848419271\", \n              \"_rev\" : \"848419271\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/bob\" \n            }, \n            { \n              \"_id\" : \"knows/848026055\", \n              \"_key\" : \"848026055\", \n              \"_rev\" : \"848026055\", \n              \"_from\" : \"persons/bob\", \n              \"_to\" : \"persons/dave\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"846518727\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/eve\", \n              \"_key\" : \"eve\", \n              \"_rev\" : \"847370695\", \n              \"name\" : \"Eve\" \n            }, \n            { \n              \"_id\" : \"persons/bob\", \n              \"_key\" : \"bob\", \n              \"_rev\" : \"846715335\", \n              \"name\" : \"Bob\" \n            }, \n            { \n              \"_id\" : \"persons/dave\", \n              \"_key\" : \"dave\", \n              \"_rev\" : \"847174087\", \n              \"name\" : \"Dave\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/848222663\", \n              \"_key\" : \"848222663\", \n              \"_rev\" : \"848222663\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/alice\" \n            }, \n            { \n              \"_id\" : \"knows/848419271\", \n              \"_key\" : \"848419271\", \n              \"_rev\" : \"848419271\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/bob\" \n            }, \n            { \n              \"_id\" : \"knows/847829447\", \n              \"_key\" : \"847829447\", \n              \"_rev\" : \"847829447\", \n              \"_from\" : \"persons/bob\", \n              \"_to\" : \"persons/charlie\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"846518727\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/eve\", \n              \"_key\" : \"eve\", \n              \"_rev\" : \"847370695\", \n              \"name\" : \"Eve\" \n            }, \n            { \n              \"_id\" : \"persons/bob\", \n              \"_key\" : \"bob\", \n              \"_rev\" : \"846715335\", \n              \"name\" : \"Bob\" \n            }, \n            { \n              \"_id\" : \"persons/charlie\", \n              \"_key\" : \"charlie\", \n              \"_rev\" : \"846977479\", \n              \"name\" : \"Charlie\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/848222663\", \n              \"_key\" : \"848222663\", \n              \"_rev\" : \"848222663\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/alice\" \n            }, \n            { \n              \"_id\" : \"knows/848419271\", \n              \"_key\" : \"848419271\", \n              \"_rev\" : \"848419271\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/bob\" \n            }, \n            { \n              \"_id\" : \"knows/847632839\", \n              \"_key\" : \"847632839\", \n              \"_rev\" : \"847632839\", \n              \"_from\" : \"persons/alice\", \n              \"_to\" : \"persons/bob\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"846518727\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/eve\", \n              \"_key\" : \"eve\", \n              \"_rev\" : \"847370695\", \n              \"name\" : \"Eve\" \n            }, \n            { \n              \"_id\" : \"persons/bob\", \n              \"_key\" : \"bob\", \n              \"_rev\" : \"846715335\", \n              \"name\" : \"Bob\" \n            }, \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"846518727\", \n              \"name\" : \"Alice\" \n            } \n          ] \n        } \n      ] \n    } \n  }, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
Example: Edges should only be included once globally, but nodes are included every time they are visited

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/traversal <<EOF\n{ \n  \"startVertex\" : \"persons/alice\", \n  \"graphName\" : \"knows_graph\", \n  \"direction\" : \"any\", \n  \"uniqueness\" : { \n    \"vertices\" : \"none\", \n    \"edges\" : \"global\" \n  } \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : { \n    \"visited\" : { \n      \"vertices\" : [ \n        { \n          \"_id\" : \"persons/alice\", \n          \"_key\" : \"alice\", \n          \"_rev\" : \"858446279\", \n          \"name\" : \"Alice\" \n        }, \n        { \n          \"_id\" : \"persons/eve\", \n          \"_key\" : \"eve\", \n          \"_rev\" : \"859298247\", \n          \"name\" : \"Eve\" \n        }, \n        { \n          \"_id\" : \"persons/bob\", \n          \"_key\" : \"bob\", \n          \"_rev\" : \"858642887\", \n          \"name\" : \"Bob\" \n        }, \n        { \n          \"_id\" : \"persons/alice\", \n          \"_key\" : \"alice\", \n          \"_rev\" : \"858446279\", \n          \"name\" : \"Alice\" \n        }, \n        { \n          \"_id\" : \"persons/charlie\", \n          \"_key\" : \"charlie\", \n          \"_rev\" : \"858905031\", \n          \"name\" : \"Charlie\" \n        }, \n        { \n          \"_id\" : \"persons/dave\", \n          \"_key\" : \"dave\", \n          \"_rev\" : \"859101639\", \n          \"name\" : \"Dave\" \n        } \n      ], \n      \"paths\" : [ \n        { \n          \"edges\" : [ ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"858446279\", \n              \"name\" : \"Alice\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/860150215\", \n              \"_key\" : \"860150215\", \n              \"_rev\" : \"860150215\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/alice\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"858446279\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/eve\", \n              \"_key\" : \"eve\", \n              \"_rev\" : \"859298247\", \n              \"name\" : \"Eve\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/860150215\", \n              \"_key\" : \"860150215\", \n              \"_rev\" : \"860150215\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/alice\" \n            }, \n            { \n              \"_id\" : \"knows/860346823\", \n              \"_key\" : \"860346823\", \n              \"_rev\" : \"860346823\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/bob\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"858446279\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/eve\", \n              \"_key\" : \"eve\", \n              \"_rev\" : \"859298247\", \n              \"name\" : \"Eve\" \n            }, \n            { \n              \"_id\" : \"persons/bob\", \n              \"_key\" : \"bob\", \n              \"_rev\" : \"858642887\", \n              \"name\" : \"Bob\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/860150215\", \n              \"_key\" : \"860150215\", \n              \"_rev\" : \"860150215\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/alice\" \n            }, \n            { \n              \"_id\" : \"knows/860346823\", \n              \"_key\" : \"860346823\", \n              \"_rev\" : \"860346823\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/bob\" \n            }, \n            { \n              \"_id\" : \"knows/859560391\", \n              \"_key\" : \"859560391\", \n              \"_rev\" : \"859560391\", \n              \"_from\" : \"persons/alice\", \n              \"_to\" : \"persons/bob\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"858446279\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/eve\", \n              \"_key\" : \"eve\", \n              \"_rev\" : \"859298247\", \n              \"name\" : \"Eve\" \n            }, \n            { \n              \"_id\" : \"persons/bob\", \n              \"_key\" : \"bob\", \n              \"_rev\" : \"858642887\", \n              \"name\" : \"Bob\" \n            }, \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"858446279\", \n              \"name\" : \"Alice\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/860150215\", \n              \"_key\" : \"860150215\", \n              \"_rev\" : \"860150215\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/alice\" \n            }, \n            { \n              \"_id\" : \"knows/860346823\", \n              \"_key\" : \"860346823\", \n              \"_rev\" : \"860346823\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/bob\" \n            }, \n            { \n              \"_id\" : \"knows/859756999\", \n              \"_key\" : \"859756999\", \n              \"_rev\" : \"859756999\", \n              \"_from\" : \"persons/bob\", \n              \"_to\" : \"persons/charlie\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"858446279\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/eve\", \n              \"_key\" : \"eve\", \n              \"_rev\" : \"859298247\", \n              \"name\" : \"Eve\" \n            }, \n            { \n              \"_id\" : \"persons/bob\", \n              \"_key\" : \"bob\", \n              \"_rev\" : \"858642887\", \n              \"name\" : \"Bob\" \n            }, \n            { \n              \"_id\" : \"persons/charlie\", \n              \"_key\" : \"charlie\", \n              \"_rev\" : \"858905031\", \n              \"name\" : \"Charlie\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/860150215\", \n              \"_key\" : \"860150215\", \n              \"_rev\" : \"860150215\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/alice\" \n            }, \n            { \n              \"_id\" : \"knows/860346823\", \n              \"_key\" : \"860346823\", \n              \"_rev\" : \"860346823\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/bob\" \n            }, \n            { \n              \"_id\" : \"knows/859953607\", \n              \"_key\" : \"859953607\", \n              \"_rev\" : \"859953607\", \n              \"_from\" : \"persons/bob\", \n              \"_to\" : \"persons/dave\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"858446279\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/eve\", \n              \"_key\" : \"eve\", \n              \"_rev\" : \"859298247\", \n              \"name\" : \"Eve\" \n            }, \n            { \n              \"_id\" : \"persons/bob\", \n              \"_key\" : \"bob\", \n              \"_rev\" : \"858642887\", \n              \"name\" : \"Bob\" \n            }, \n            { \n              \"_id\" : \"persons/dave\", \n              \"_key\" : \"dave\", \n              \"_rev\" : \"859101639\", \n              \"name\" : \"Dave\" \n            } \n          ] \n        } \n      ] \n    } \n  }, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
Example: If the underlying graph is cyclic, maxIterations should be set
The underlying graph has two vertices Alice and Bob. With the directed edges:
  • Alice knows Bob _ Bob knows Alice


shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/traversal <<EOF\n{ \n  \"startVertex\" : \"persons/alice\", \n  \"graphName\" : \"knows_graph\", \n  \"direction\" : \"any\", \n  \"uniqueness\" : { \n    \"vertices\" : \"none\", \n    \"edges\" : \"none\" \n  }, \n  \"maxIterations\" : 5 \n}\nEOF\n\nHTTP/1.1 500 Internal Server Error\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"error\" : true, \n  \"code\" : 500, \n  \"errorNum\" : 1909, \n  \"errorMessage\" : \"too many iterations - try increasing the value of 'maxIterations'\" \n}\n

\n

", + "parameters": [ + { + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "$ref": "#/definitions/JSF_HTTP_API_TRAVERSAL" + }, + "x-description-offset": 222 + } + ], + "responses": { + "200": { + "description": "If the traversal is fully executed HTTP 200 will be returned.
" + }, + "400": { + "description": "If the traversal specification is either missing or malformed, the server will respond with HTTP 400.
" + }, + "404": { + "description": "The server will responded with HTTP 404 if the specified edge collection does not exist, or the specified start vertex cannot be found.
" + }, + "500": { + "description": "The server will responded with HTTP 500 when an error occurs inside the traversal or if a traversal performs more than maxIterations iterations.
" + } + }, + "summary": "executes a traversal", + "tags": [ + "Graph Traversal" + ], + "x-examples": [], + "x-filename": "Graph Traversal - js/actions/api-traversal.js" + } + }, + "/_api/user": { + "post": { + "description": "\n\n
The following data need to be passed in a JSON representation in the body of the POST request:
  • user: The name of the user as a string. This is mandatory.
  • passwd: The user password as a string. If no password is specified, the empty string will be used. If you pass the special value ARANGODB_DEFAULT_ROOT_PASSWORD, the password will be set the value stored in the environment variable `ARANGODB_DEFAULT_ROOT_PASSWORD`. This can be used to pass an instance variable into ArangoDB. For example, the instance identifier from Amazon.
  • active: An optional flag that specifies whether the user is active. If not specified, this will default to true
  • extra: An optional JSON object with arbitrary extra data about the user
  • changePassword: An optional flag that specifies whethers the user must change the password or not. If not specified, this will default to false. If set to true, the only operations allowed are PUT /_api/user or PATCH /_api/user. All other operations executed by the user will result in an HTTP 403.
If the user can be added by the server, the server will respond with HTTP 201. In case of success, the returned JSON object has the following properties:
  • error: Boolean flag to indicate that an error occurred (false in this case)
  • code: The HTTP status code
In case of error, the body of the response will contain a JSON object with additional error details. The object has the following attributes:
  • error: Boolean flag to indicate that an error occurred (true in this case)
  • code: The HTTP status code
  • errorNum: The server error number
  • errorMessage: A descriptive error message
", + "parameters": [], + "responses": { + "201": { + "description": "Returned if the user can be added by the server
" + }, + "400": { + "description": "If the JSON representation is malformed or mandatory data is missing from the request.

" + } + }, + "summary": " Create User", + "tags": [ + "User handling" + ], + "x-examples": [], + "x-filename": "User handling - js/actions/_api/user/app.js" + } + }, + "/_api/user/": { + "get": { + "description": "\n\n
Fetches data about all users.
The call will return a JSON object with at least the following attributes on success:
  • user: The name of the user as a string.
  • active: An optional flag that specifies whether the user is active.
  • extra: An optional JSON object with arbitrary extra data about the user.
  • changePassword: An optional flag that specifies whether the user must change the password or not.
", + "parameters": [], + "responses": { + "200": { + "description": "The users that were found

" + } + }, + "summary": " List available Users", + "tags": [ + "User handling" + ], + "x-examples": [], + "x-filename": "User handling - js/actions/_api/user/app.js" + } + }, + "/_api/user/{user}": { + "delete": { + "description": "\n\n
Removes an existing user, identified by user.
If the user can be removed, the server will respond with HTTP 202. In case of success, the returned JSON object has the following properties:
  • error: Boolean flag to indicate that an error occurred (false in this case)
  • code: The HTTP status code
In case of error, the body of the response will contain a JSON object with additional error details. The object has the following attributes:
  • error: Boolean flag to indicate that an error occurred (true in this case)
  • code: The HTTP status code
  • errorNum: The server error number
  • errorMessage: A descriptive error message
", + "parameters": [ + { + "description": "The name of the user
", + "format": "string", + "in": "path", + "name": "user", + "required": true, + "type": "string" + } + ], + "responses": { + "202": { + "description": "Is returned if the user was removed by the server
" + }, + "404": { + "description": "The specified user does not exist

" + } + }, + "summary": " Remove User", + "tags": [ + "User handling" + ], + "x-examples": [], + "x-filename": "User handling - js/actions/_api/user/app.js" + }, + "get": { + "description": "\n\n
Fetches data about the specified user.
The call will return a JSON object with at least the following attributes on success:
  • user: The name of the user as a string.
  • active: An optional flag that specifies whether the user is active.
  • extra: An optional JSON object with arbitrary extra data about the user.
  • changePassword: An optional flag that specifies whether the user must change the password or not.
", + "parameters": [ + { + "description": "The name of the user
", + "format": "string", + "in": "path", + "name": "user", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "The user was found
" + }, + "404": { + "description": "The user with the specified name does not exist

" + } + }, + "summary": " Fetch User", + "tags": [ + "User handling" + ], + "x-examples": [], + "x-filename": "User handling - js/actions/_api/user/app.js" + }, + "patch": { + "description": "\n\n
Partially updates the data of an existing user. The name of an existing user must be specified in user.
The following data can be passed in a JSON representation in the body of the POST request:
  • passwd: The user password as a string. Specifying a password is optional. If not specified, the previously existing value will not be modified.
  • active: An optional flag that specifies whether the user is active. If not specified, the previously existing value will not be modified.
  • extra: An optional JSON object with arbitrary extra data about the user. If not specified, the previously existing value will not be modified.
  • changePassword: An optional flag that specifies whether the user must change the password or not. If not specified, the previously existing value will not be modified.
If the user can be updated by the server, the server will respond with HTTP 200.
In case of success, the returned JSON object has the following properties:
  • error: Boolean flag to indicate that an error occurred (false in this case)
  • code: The HTTP status code
In case of error, the body of the response will contain a JSON object with additional error details. The object has the following attributes:
  • error: Boolean flag to indicate that an error occurred (true in this case)
  • code: The HTTP status code
  • errorNum: The server error number
  • errorMessage: A descriptive error message
", + "parameters": [ + { + "description": "The name of the user
", + "format": "string", + "in": "path", + "name": "user", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Is returned if the user data can be replaced by the server
" + }, + "400": { + "description": "The JSON representation is malformed or mandatory data is missing from the request
" + }, + "404": { + "description": "The specified user does not exist

" + } + }, + "summary": " Update User", + "tags": [ + "User handling" + ], + "x-examples": [], + "x-filename": "User handling - js/actions/_api/user/app.js" + }, + "put": { + "description": "\n\n
Replaces the data of an existing user. The name of an existing user must be specified in user.
The following data can to be passed in a JSON representation in the body of the POST request:
  • passwd: The user password as a string. Specifying a password is mandatory, but the empty string is allowed for passwords
  • active: An optional flag that specifies whether the user is active. If not specified, this will default to true
  • extra: An optional JSON object with arbitrary extra data about the user
  • changePassword: An optional flag that specifies whether the user must change the password or not. If not specified, this will default to false
If the user can be replaced by the server, the server will respond with HTTP 200.
In case of success, the returned JSON object has the following properties:
  • error: Boolean flag to indicate that an error occurred (false in this case)
  • code: The HTTP status code
In case of error, the body of the response will contain a JSON object with additional error details. The object has the following attributes:
  • error: Boolean flag to indicate that an error occurred (true in this case)
  • code: The HTTP status code
  • errorNum: The server error number
  • errorMessage: A descriptive error message
", + "parameters": [ + { + "description": "The name of the user
", + "format": "string", + "in": "path", + "name": "user", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Is returned if the user data can be replaced by the server
" + }, + "400": { + "description": "The JSON representation is malformed or mandatory data is missing from the request
" + }, + "404": { + "description": "The specified user does not exist

" + } + }, + "summary": " Replace User", + "tags": [ + "User handling" + ], + "x-examples": [], + "x-filename": "User handling - js/actions/_api/user/app.js" + } + }, + "/_api/version": { + "get": { + "description": "\n\nReturns the server name and version number. The response is a JSON object with the following attributes:
**A json document with these Properties is returned:**
  • version: the server version string. The string has the format \"major.*minor.*sub\". major and minor will be numeric, and sub may contain a number or a textual version.
  • details: an optional JSON object with additional details. This is returned only if the details URL parameter is set to true in the request.
  • server: will always contain arango

Example: Return the version information

shell> curl --dump - http://localhost:8529/_api/version\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"server\" : \"arango\", \n  \"version\" : \"2.7.0-devel\" \n}\n

\n
Example: Return the version information with details

shell> curl --dump - http://localhost:8529/_api/version?details=true\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"server\" : \"arango\", \n  \"version\" : \"2.7.0-devel\", \n  \"details\" : { \n    \"architecture\" : \"64bit\", \n    \"build-date\" : \"2015-09-25 11:17:39\", \n    \"configure\" : \"'./configure' '--enable-relative' '--enable-maintainer-mode' '--with-backtrace' '--enable-v8-debug' 'CXXFLAGS=-O0 -ggdb -DDEBUG_CLUSTER_COMM' 'CFLAGS=-O0 -ggdb  -DDEBUG_CLUSTER_COMM'\", \n    \"env\" : \"CFLAGS='-O0 -ggdb  -DDEBUG_CLUSTER_COMM' CXXFLAGS='-O0 -ggdb -DDEBUG_CLUSTER_COMM'\", \n    \"fd-client-event-handler\" : \"poll\", \n    \"fd-setsize\" : \"1024\", \n    \"icu-version\" : \"54.1\", \n    \"libev-version\" : \"4.11\", \n    \"maintainer-mode\" : \"true\", \n    \"openssl-version\" : \"OpenSSL 1.0.2 22 Jan 2015\", \n    \"readline-version\" : \"6.3\", \n    \"repository-version\" : \"heads/devel-0-g43dd92bb4716d73c7128478b4a7cdb36fd200421\", \n    \"server-version\" : \"2.7.0-devel\", \n    \"sizeof int\" : \"4\", \n    \"sizeof void*\" : \"8\", \n    \"tcmalloc\" : \"false\", \n    \"v8-version\" : \"4.3.61\", \n    \"mode\" : \"standalone\" \n  } \n}\n

\n
", + "parameters": [ + { + "description": "If set to true, the response will contain a details attribute with additional information about included components and their versions. The attribute names and internals of the details object may vary depending on platform and ArangoDB version.
", + "in": "query", + "name": "details", + "required": false, + "type": "boolean" + } + ], + "produces": [ + "application/json" + ], + "responses": { + "200": { + "description": "is returned in all cases.
", + "schema": { + "$ref": "#/definitions/JSF_get_api_return_rc_200" + }, + "x-description-offset": 165 + } + }, + "summary": " Return server version", + "tags": [ + "Administration" + ], + "x-examples": [], + "x-filename": "Administration - js/actions/_admin/app.js, js/actions/_admin/routing/app.js, js/actions/_admin/server/app.js, js/actions/_admin/database/app.js, arangod/RestHandler/RestShutdownHandler.cpp, arangod/RestHandler/RestAdminLogHandler.cpp, js/actions/api-tasks.js, js/actions/api-endpoint.js, arangod/RestHandler/RestVersionHandler.cpp, js/actions/api-system.js" + } + } + }, + "schemes": [ + "http" + ], + "swagger": "2.0" +} diff --git a/test-perf/src/main/resources/multi-docs.json b/test-perf/src/main/resources/multi-docs.json new file mode 100644 index 000000000..564038320 --- /dev/null +++ b/test-perf/src/main/resources/multi-docs.json @@ -0,0 +1,719 @@ +[ + { + "properties": { + "doCompact": { + "description": "whether or not the collection will be compacted (default is true)
", + "format": "", + "type": "boolean" + }, + "indexBuckets": { + "description": "The: number of buckets into which indexes using a hash table are split. The default is 16 and this number has to be a power of 2 and less than or equal to 1024.
For very large collections one should increase this to avoid long pauses when the hash table has to be initially built or resized, since buckets are resized individually and can be initially built in parallel. For example, 64 might be a sensible value for a collection with 100 000 000 documents. Currently, only the edge index respects this value, but other index types might follow in future ArangoDB versions. Changes (see below) are applied when the collection is loaded the next time.
", + "format": "int64", + "type": "integer" + }, + "isSystem": { + "description": "If true, create a system collection. In this case collection-name should start with an underscore. End users should normally create non-system collections only. API implementors may be required to create system collections in very special occasions, but normally a regular collection will do. (The default is false)
", + "format": "", + "type": "boolean" + }, + "isVolatile": { + "description": "If true then the collection data is kept in-memory only and not made persistent. Unloading the collection will cause the collection data to be discarded. Stopping or re-starting the server will also cause full loss of data in the collection. Setting this option will make the resulting collection be slightly faster than regular collections because ArangoDB does not enforce any synchronization to disk and does not calculate any CRC checksums for datafiles (as there are no datafiles). This option should therefore be used for cache-type collections only, and not for data that cannot be re-created otherwise. (The default is false)
", + "format": "", + "type": "boolean" + }, + "journalSize": { + "description": "The maximal size of a journal or datafile in bytes. The value must be at least `1048576` (1 MiB). (The default is a configuration parameter)
", + "format": "int64", + "type": "integer" + }, + "keyOptions": { + "$ref": "#/definitions/JSF_post_api_collection_opts" + }, + "name": { + "description": "The name of the collection.
", + "type": "string" + }, + "numberOfShards": { + "description": "(The default is 1): in a cluster, this value determines the number of shards to create for the collection. In a single server setup, this option is meaningless.
", + "format": "int64", + "type": "integer" + }, + "shardKeys": { + "description": "(The default is [ \"_key\" ]): in a cluster, this attribute determines which document attributes are used to determine the target shard for documents. Documents are sent to shards based on the values of their shard key attributes. The values of all shard key attributes in a document are hashed, and the hash value is used to determine the target shard. Note: Values of shard key attributes cannot be changed once set. This option is meaningless in a single server setup.
", + "type": "string" + }, + "type": { + "description": "(The default is 2): the type of the collection to create. The following values for type are valid:
  • 2: document collection
  • 3: edges collection
", + "format": "int64", + "type": "integer" + }, + "waitForSync": { + "description": "If true then the data is synchronized to disk before returning from a document create, update, replace or removal operation. (default: false)
", + "format": "", + "type": "boolean" + } + }, + "_rev": "uuu", + "_key": "iii", + "_id": "ooo/iii", + "required": [ + "name" + ], + "type": "object", + "x-filename": "Collections - js/actions/_api/collection/app.js" + }, + { + "error": true, + "errorNum": 11, + "code": 500, + "errorMessage": "bla" + }, + { + "properties": { + "doCompact": { + "description": "whether or not the collection will be compacted (default is true)
", + "format": "", + "type": "boolean" + }, + "indexBuckets": { + "description": "The: number of buckets into which indexes using a hash table are split. The default is 16 and this number has to be a power of 2 and less than or equal to 1024.
For very large collections one should increase this to avoid long pauses when the hash table has to be initially built or resized, since buckets are resized individually and can be initially built in parallel. For example, 64 might be a sensible value for a collection with 100 000 000 documents. Currently, only the edge index respects this value, but other index types might follow in future ArangoDB versions. Changes (see below) are applied when the collection is loaded the next time.
", + "format": "int64", + "type": "integer" + }, + "isSystem": { + "description": "If true, create a system collection. In this case collection-name should start with an underscore. End users should normally create non-system collections only. API implementors may be required to create system collections in very special occasions, but normally a regular collection will do. (The default is false)
", + "format": "", + "type": "boolean" + }, + "isVolatile": { + "description": "If true then the collection data is kept in-memory only and not made persistent. Unloading the collection will cause the collection data to be discarded. Stopping or re-starting the server will also cause full loss of data in the collection. Setting this option will make the resulting collection be slightly faster than regular collections because ArangoDB does not enforce any synchronization to disk and does not calculate any CRC checksums for datafiles (as there are no datafiles). This option should therefore be used for cache-type collections only, and not for data that cannot be re-created otherwise. (The default is false)
", + "format": "", + "type": "boolean" + }, + "journalSize": { + "description": "The maximal size of a journal or datafile in bytes. The value must be at least `1048576` (1 MiB). (The default is a configuration parameter)
", + "format": "int64", + "type": "integer" + }, + "keyOptions": { + "$ref": "#/definitions/JSF_post_api_collection_opts" + }, + "name": { + "description": "The name of the collection.
", + "type": "string" + }, + "numberOfShards": { + "description": "(The default is 1): in a cluster, this value determines the number of shards to create for the collection. In a single server setup, this option is meaningless.
", + "format": "int64", + "type": "integer" + }, + "shardKeys": { + "description": "(The default is [ \"_key\" ]): in a cluster, this attribute determines which document attributes are used to determine the target shard for documents. Documents are sent to shards based on the values of their shard key attributes. The values of all shard key attributes in a document are hashed, and the hash value is used to determine the target shard. Note: Values of shard key attributes cannot be changed once set. This option is meaningless in a single server setup.
", + "type": "string" + }, + "type": { + "description": "(The default is 2): the type of the collection to create. The following values for type are valid:
  • 2: document collection
  • 3: edges collection
", + "format": "int64", + "type": "integer" + }, + "waitForSync": { + "description": "If true then the data is synchronized to disk before returning from a document create, update, replace or removal operation. (default: false)
", + "format": "", + "type": "boolean" + } + }, + "_rev": "uuu", + "_key": "iii", + "_id": "ooo/iii", + "required": [ + "name" + ], + "type": "object", + "x-filename": "Collections - js/actions/_api/collection/app.js" + }, + { + "properties": { + "doCompact": { + "description": "whether or not the collection will be compacted (default is true)
", + "format": "", + "type": "boolean" + }, + "indexBuckets": { + "description": "The: number of buckets into which indexes using a hash table are split. The default is 16 and this number has to be a power of 2 and less than or equal to 1024.
For very large collections one should increase this to avoid long pauses when the hash table has to be initially built or resized, since buckets are resized individually and can be initially built in parallel. For example, 64 might be a sensible value for a collection with 100 000 000 documents. Currently, only the edge index respects this value, but other index types might follow in future ArangoDB versions. Changes (see below) are applied when the collection is loaded the next time.
", + "format": "int64", + "type": "integer" + }, + "isSystem": { + "description": "If true, create a system collection. In this case collection-name should start with an underscore. End users should normally create non-system collections only. API implementors may be required to create system collections in very special occasions, but normally a regular collection will do. (The default is false)
", + "format": "", + "type": "boolean" + }, + "isVolatile": { + "description": "If true then the collection data is kept in-memory only and not made persistent. Unloading the collection will cause the collection data to be discarded. Stopping or re-starting the server will also cause full loss of data in the collection. Setting this option will make the resulting collection be slightly faster than regular collections because ArangoDB does not enforce any synchronization to disk and does not calculate any CRC checksums for datafiles (as there are no datafiles). This option should therefore be used for cache-type collections only, and not for data that cannot be re-created otherwise. (The default is false)
", + "format": "", + "type": "boolean" + }, + "journalSize": { + "description": "The maximal size of a journal or datafile in bytes. The value must be at least `1048576` (1 MiB). (The default is a configuration parameter)
", + "format": "int64", + "type": "integer" + }, + "keyOptions": { + "$ref": "#/definitions/JSF_post_api_collection_opts" + }, + "name": { + "description": "The name of the collection.
", + "type": "string" + }, + "numberOfShards": { + "description": "(The default is 1): in a cluster, this value determines the number of shards to create for the collection. In a single server setup, this option is meaningless.
", + "format": "int64", + "type": "integer" + }, + "shardKeys": { + "description": "(The default is [ \"_key\" ]): in a cluster, this attribute determines which document attributes are used to determine the target shard for documents. Documents are sent to shards based on the values of their shard key attributes. The values of all shard key attributes in a document are hashed, and the hash value is used to determine the target shard. Note: Values of shard key attributes cannot be changed once set. This option is meaningless in a single server setup.
", + "type": "string" + }, + "type": { + "description": "(The default is 2): the type of the collection to create. The following values for type are valid:
  • 2: document collection
  • 3: edges collection
", + "format": "int64", + "type": "integer" + }, + "waitForSync": { + "description": "If true then the data is synchronized to disk before returning from a document create, update, replace or removal operation. (default: false)
", + "format": "", + "type": "boolean" + } + }, + "_rev": "uuu", + "_key": "iii", + "_id": "ooo/iii", + "required": [ + "name" + ], + "type": "object", + "x-filename": "Collections - js/actions/_api/collection/app.js" + }, + { + "error": true, + "errorNum": 11, + "code": 500, + "errorMessage": "bla" + }, + { + "properties": { + "doCompact": { + "description": "whether or not the collection will be compacted (default is true)
", + "format": "", + "type": "boolean" + }, + "indexBuckets": { + "description": "The: number of buckets into which indexes using a hash table are split. The default is 16 and this number has to be a power of 2 and less than or equal to 1024.
For very large collections one should increase this to avoid long pauses when the hash table has to be initially built or resized, since buckets are resized individually and can be initially built in parallel. For example, 64 might be a sensible value for a collection with 100 000 000 documents. Currently, only the edge index respects this value, but other index types might follow in future ArangoDB versions. Changes (see below) are applied when the collection is loaded the next time.
", + "format": "int64", + "type": "integer" + }, + "isSystem": { + "description": "If true, create a system collection. In this case collection-name should start with an underscore. End users should normally create non-system collections only. API implementors may be required to create system collections in very special occasions, but normally a regular collection will do. (The default is false)
", + "format": "", + "type": "boolean" + }, + "isVolatile": { + "description": "If true then the collection data is kept in-memory only and not made persistent. Unloading the collection will cause the collection data to be discarded. Stopping or re-starting the server will also cause full loss of data in the collection. Setting this option will make the resulting collection be slightly faster than regular collections because ArangoDB does not enforce any synchronization to disk and does not calculate any CRC checksums for datafiles (as there are no datafiles). This option should therefore be used for cache-type collections only, and not for data that cannot be re-created otherwise. (The default is false)
", + "format": "", + "type": "boolean" + }, + "journalSize": { + "description": "The maximal size of a journal or datafile in bytes. The value must be at least `1048576` (1 MiB). (The default is a configuration parameter)
", + "format": "int64", + "type": "integer" + }, + "keyOptions": { + "$ref": "#/definitions/JSF_post_api_collection_opts" + }, + "name": { + "description": "The name of the collection.
", + "type": "string" + }, + "numberOfShards": { + "description": "(The default is 1): in a cluster, this value determines the number of shards to create for the collection. In a single server setup, this option is meaningless.
", + "format": "int64", + "type": "integer" + }, + "shardKeys": { + "description": "(The default is [ \"_key\" ]): in a cluster, this attribute determines which document attributes are used to determine the target shard for documents. Documents are sent to shards based on the values of their shard key attributes. The values of all shard key attributes in a document are hashed, and the hash value is used to determine the target shard. Note: Values of shard key attributes cannot be changed once set. This option is meaningless in a single server setup.
", + "type": "string" + }, + "type": { + "description": "(The default is 2): the type of the collection to create. The following values for type are valid:
  • 2: document collection
  • 3: edges collection
", + "format": "int64", + "type": "integer" + }, + "waitForSync": { + "description": "If true then the data is synchronized to disk before returning from a document create, update, replace or removal operation. (default: false)
", + "format": "", + "type": "boolean" + } + }, + "_rev": "uuu", + "_key": "iii", + "_id": "ooo/iii", + "required": [ + "name" + ], + "type": "object", + "x-filename": "Collections - js/actions/_api/collection/app.js" + }, + { + "properties": { + "doCompact": { + "description": "whether or not the collection will be compacted (default is true)
", + "format": "", + "type": "boolean" + }, + "indexBuckets": { + "description": "The: number of buckets into which indexes using a hash table are split. The default is 16 and this number has to be a power of 2 and less than or equal to 1024.
For very large collections one should increase this to avoid long pauses when the hash table has to be initially built or resized, since buckets are resized individually and can be initially built in parallel. For example, 64 might be a sensible value for a collection with 100 000 000 documents. Currently, only the edge index respects this value, but other index types might follow in future ArangoDB versions. Changes (see below) are applied when the collection is loaded the next time.
", + "format": "int64", + "type": "integer" + }, + "isSystem": { + "description": "If true, create a system collection. In this case collection-name should start with an underscore. End users should normally create non-system collections only. API implementors may be required to create system collections in very special occasions, but normally a regular collection will do. (The default is false)
", + "format": "", + "type": "boolean" + }, + "isVolatile": { + "description": "If true then the collection data is kept in-memory only and not made persistent. Unloading the collection will cause the collection data to be discarded. Stopping or re-starting the server will also cause full loss of data in the collection. Setting this option will make the resulting collection be slightly faster than regular collections because ArangoDB does not enforce any synchronization to disk and does not calculate any CRC checksums for datafiles (as there are no datafiles). This option should therefore be used for cache-type collections only, and not for data that cannot be re-created otherwise. (The default is false)
", + "format": "", + "type": "boolean" + }, + "journalSize": { + "description": "The maximal size of a journal or datafile in bytes. The value must be at least `1048576` (1 MiB). (The default is a configuration parameter)
", + "format": "int64", + "type": "integer" + }, + "keyOptions": { + "$ref": "#/definitions/JSF_post_api_collection_opts" + }, + "name": { + "description": "The name of the collection.
", + "type": "string" + }, + "numberOfShards": { + "description": "(The default is 1): in a cluster, this value determines the number of shards to create for the collection. In a single server setup, this option is meaningless.
", + "format": "int64", + "type": "integer" + }, + "shardKeys": { + "description": "(The default is [ \"_key\" ]): in a cluster, this attribute determines which document attributes are used to determine the target shard for documents. Documents are sent to shards based on the values of their shard key attributes. The values of all shard key attributes in a document are hashed, and the hash value is used to determine the target shard. Note: Values of shard key attributes cannot be changed once set. This option is meaningless in a single server setup.
", + "type": "string" + }, + "type": { + "description": "(The default is 2): the type of the collection to create. The following values for type are valid:
  • 2: document collection
  • 3: edges collection
", + "format": "int64", + "type": "integer" + }, + "waitForSync": { + "description": "If true then the data is synchronized to disk before returning from a document create, update, replace or removal operation. (default: false)
", + "format": "", + "type": "boolean" + } + }, + "_rev": "uuu", + "_key": "iii", + "_id": "ooo/iii", + "required": [ + "name" + ], + "type": "object", + "x-filename": "Collections - js/actions/_api/collection/app.js" + }, + { + "properties": { + "doCompact": { + "description": "whether or not the collection will be compacted (default is true)
", + "format": "", + "type": "boolean" + }, + "indexBuckets": { + "description": "The: number of buckets into which indexes using a hash table are split. The default is 16 and this number has to be a power of 2 and less than or equal to 1024.
For very large collections one should increase this to avoid long pauses when the hash table has to be initially built or resized, since buckets are resized individually and can be initially built in parallel. For example, 64 might be a sensible value for a collection with 100 000 000 documents. Currently, only the edge index respects this value, but other index types might follow in future ArangoDB versions. Changes (see below) are applied when the collection is loaded the next time.
", + "format": "int64", + "type": "integer" + }, + "isSystem": { + "description": "If true, create a system collection. In this case collection-name should start with an underscore. End users should normally create non-system collections only. API implementors may be required to create system collections in very special occasions, but normally a regular collection will do. (The default is false)
", + "format": "", + "type": "boolean" + }, + "isVolatile": { + "description": "If true then the collection data is kept in-memory only and not made persistent. Unloading the collection will cause the collection data to be discarded. Stopping or re-starting the server will also cause full loss of data in the collection. Setting this option will make the resulting collection be slightly faster than regular collections because ArangoDB does not enforce any synchronization to disk and does not calculate any CRC checksums for datafiles (as there are no datafiles). This option should therefore be used for cache-type collections only, and not for data that cannot be re-created otherwise. (The default is false)
", + "format": "", + "type": "boolean" + }, + "journalSize": { + "description": "The maximal size of a journal or datafile in bytes. The value must be at least `1048576` (1 MiB). (The default is a configuration parameter)
", + "format": "int64", + "type": "integer" + }, + "keyOptions": { + "$ref": "#/definitions/JSF_post_api_collection_opts" + }, + "name": { + "description": "The name of the collection.
", + "type": "string" + }, + "numberOfShards": { + "description": "(The default is 1): in a cluster, this value determines the number of shards to create for the collection. In a single server setup, this option is meaningless.
", + "format": "int64", + "type": "integer" + }, + "shardKeys": { + "description": "(The default is [ \"_key\" ]): in a cluster, this attribute determines which document attributes are used to determine the target shard for documents. Documents are sent to shards based on the values of their shard key attributes. The values of all shard key attributes in a document are hashed, and the hash value is used to determine the target shard. Note: Values of shard key attributes cannot be changed once set. This option is meaningless in a single server setup.
", + "type": "string" + }, + "type": { + "description": "(The default is 2): the type of the collection to create. The following values for type are valid:
  • 2: document collection
  • 3: edges collection
", + "format": "int64", + "type": "integer" + }, + "waitForSync": { + "description": "If true then the data is synchronized to disk before returning from a document create, update, replace or removal operation. (default: false)
", + "format": "", + "type": "boolean" + } + }, + "_rev": "uuu", + "_key": "iii", + "_id": "ooo/iii", + "required": [ + "name" + ], + "type": "object", + "x-filename": "Collections - js/actions/_api/collection/app.js" + }, + { + "properties": { + "doCompact": { + "description": "whether or not the collection will be compacted (default is true)
", + "format": "", + "type": "boolean" + }, + "indexBuckets": { + "description": "The: number of buckets into which indexes using a hash table are split. The default is 16 and this number has to be a power of 2 and less than or equal to 1024.
For very large collections one should increase this to avoid long pauses when the hash table has to be initially built or resized, since buckets are resized individually and can be initially built in parallel. For example, 64 might be a sensible value for a collection with 100 000 000 documents. Currently, only the edge index respects this value, but other index types might follow in future ArangoDB versions. Changes (see below) are applied when the collection is loaded the next time.
", + "format": "int64", + "type": "integer" + }, + "isSystem": { + "description": "If true, create a system collection. In this case collection-name should start with an underscore. End users should normally create non-system collections only. API implementors may be required to create system collections in very special occasions, but normally a regular collection will do. (The default is false)
", + "format": "", + "type": "boolean" + }, + "isVolatile": { + "description": "If true then the collection data is kept in-memory only and not made persistent. Unloading the collection will cause the collection data to be discarded. Stopping or re-starting the server will also cause full loss of data in the collection. Setting this option will make the resulting collection be slightly faster than regular collections because ArangoDB does not enforce any synchronization to disk and does not calculate any CRC checksums for datafiles (as there are no datafiles). This option should therefore be used for cache-type collections only, and not for data that cannot be re-created otherwise. (The default is false)
", + "format": "", + "type": "boolean" + }, + "journalSize": { + "description": "The maximal size of a journal or datafile in bytes. The value must be at least `1048576` (1 MiB). (The default is a configuration parameter)
", + "format": "int64", + "type": "integer" + }, + "keyOptions": { + "$ref": "#/definitions/JSF_post_api_collection_opts" + }, + "name": { + "description": "The name of the collection.
", + "type": "string" + }, + "numberOfShards": { + "description": "(The default is 1): in a cluster, this value determines the number of shards to create for the collection. In a single server setup, this option is meaningless.
", + "format": "int64", + "type": "integer" + }, + "shardKeys": { + "description": "(The default is [ \"_key\" ]): in a cluster, this attribute determines which document attributes are used to determine the target shard for documents. Documents are sent to shards based on the values of their shard key attributes. The values of all shard key attributes in a document are hashed, and the hash value is used to determine the target shard. Note: Values of shard key attributes cannot be changed once set. This option is meaningless in a single server setup.
", + "type": "string" + }, + "type": { + "description": "(The default is 2): the type of the collection to create. The following values for type are valid:
  • 2: document collection
  • 3: edges collection
", + "format": "int64", + "type": "integer" + }, + "waitForSync": { + "description": "If true then the data is synchronized to disk before returning from a document create, update, replace or removal operation. (default: false)
", + "format": "", + "type": "boolean" + } + }, + "_rev": "uuu", + "_key": "iii", + "_id": "ooo/iii", + "required": [ + "name" + ], + "type": "object", + "x-filename": "Collections - js/actions/_api/collection/app.js" + }, + { + "error": true, + "errorNum": 11, + "code": 500, + "errorMessage": "bla" + }, + { + "properties": { + "doCompact": { + "description": "whether or not the collection will be compacted (default is true)
", + "format": "", + "type": "boolean" + }, + "indexBuckets": { + "description": "The: number of buckets into which indexes using a hash table are split. The default is 16 and this number has to be a power of 2 and less than or equal to 1024.
For very large collections one should increase this to avoid long pauses when the hash table has to be initially built or resized, since buckets are resized individually and can be initially built in parallel. For example, 64 might be a sensible value for a collection with 100 000 000 documents. Currently, only the edge index respects this value, but other index types might follow in future ArangoDB versions. Changes (see below) are applied when the collection is loaded the next time.
", + "format": "int64", + "type": "integer" + }, + "isSystem": { + "description": "If true, create a system collection. In this case collection-name should start with an underscore. End users should normally create non-system collections only. API implementors may be required to create system collections in very special occasions, but normally a regular collection will do. (The default is false)
", + "format": "", + "type": "boolean" + }, + "isVolatile": { + "description": "If true then the collection data is kept in-memory only and not made persistent. Unloading the collection will cause the collection data to be discarded. Stopping or re-starting the server will also cause full loss of data in the collection. Setting this option will make the resulting collection be slightly faster than regular collections because ArangoDB does not enforce any synchronization to disk and does not calculate any CRC checksums for datafiles (as there are no datafiles). This option should therefore be used for cache-type collections only, and not for data that cannot be re-created otherwise. (The default is false)
", + "format": "", + "type": "boolean" + }, + "journalSize": { + "description": "The maximal size of a journal or datafile in bytes. The value must be at least `1048576` (1 MiB). (The default is a configuration parameter)
", + "format": "int64", + "type": "integer" + }, + "keyOptions": { + "$ref": "#/definitions/JSF_post_api_collection_opts" + }, + "name": { + "description": "The name of the collection.
", + "type": "string" + }, + "numberOfShards": { + "description": "(The default is 1): in a cluster, this value determines the number of shards to create for the collection. In a single server setup, this option is meaningless.
", + "format": "int64", + "type": "integer" + }, + "shardKeys": { + "description": "(The default is [ \"_key\" ]): in a cluster, this attribute determines which document attributes are used to determine the target shard for documents. Documents are sent to shards based on the values of their shard key attributes. The values of all shard key attributes in a document are hashed, and the hash value is used to determine the target shard. Note: Values of shard key attributes cannot be changed once set. This option is meaningless in a single server setup.
", + "type": "string" + }, + "type": { + "description": "(The default is 2): the type of the collection to create. The following values for type are valid:
  • 2: document collection
  • 3: edges collection
", + "format": "int64", + "type": "integer" + }, + "waitForSync": { + "description": "If true then the data is synchronized to disk before returning from a document create, update, replace or removal operation. (default: false)
", + "format": "", + "type": "boolean" + } + }, + "_rev": "uuu", + "_key": "iii", + "_id": "ooo/iii", + "required": [ + "name" + ], + "type": "object", + "x-filename": "Collections - js/actions/_api/collection/app.js" + }, + { + "properties": { + "doCompact": { + "description": "whether or not the collection will be compacted (default is true)
", + "format": "", + "type": "boolean" + }, + "indexBuckets": { + "description": "The: number of buckets into which indexes using a hash table are split. The default is 16 and this number has to be a power of 2 and less than or equal to 1024.
For very large collections one should increase this to avoid long pauses when the hash table has to be initially built or resized, since buckets are resized individually and can be initially built in parallel. For example, 64 might be a sensible value for a collection with 100 000 000 documents. Currently, only the edge index respects this value, but other index types might follow in future ArangoDB versions. Changes (see below) are applied when the collection is loaded the next time.
", + "format": "int64", + "type": "integer" + }, + "isSystem": { + "description": "If true, create a system collection. In this case collection-name should start with an underscore. End users should normally create non-system collections only. API implementors may be required to create system collections in very special occasions, but normally a regular collection will do. (The default is false)
", + "format": "", + "type": "boolean" + }, + "isVolatile": { + "description": "If true then the collection data is kept in-memory only and not made persistent. Unloading the collection will cause the collection data to be discarded. Stopping or re-starting the server will also cause full loss of data in the collection. Setting this option will make the resulting collection be slightly faster than regular collections because ArangoDB does not enforce any synchronization to disk and does not calculate any CRC checksums for datafiles (as there are no datafiles). This option should therefore be used for cache-type collections only, and not for data that cannot be re-created otherwise. (The default is false)
", + "format": "", + "type": "boolean" + }, + "journalSize": { + "description": "The maximal size of a journal or datafile in bytes. The value must be at least `1048576` (1 MiB). (The default is a configuration parameter)
", + "format": "int64", + "type": "integer" + }, + "keyOptions": { + "$ref": "#/definitions/JSF_post_api_collection_opts" + }, + "name": { + "description": "The name of the collection.
", + "type": "string" + }, + "numberOfShards": { + "description": "(The default is 1): in a cluster, this value determines the number of shards to create for the collection. In a single server setup, this option is meaningless.
", + "format": "int64", + "type": "integer" + }, + "shardKeys": { + "description": "(The default is [ \"_key\" ]): in a cluster, this attribute determines which document attributes are used to determine the target shard for documents. Documents are sent to shards based on the values of their shard key attributes. The values of all shard key attributes in a document are hashed, and the hash value is used to determine the target shard. Note: Values of shard key attributes cannot be changed once set. This option is meaningless in a single server setup.
", + "type": "string" + }, + "type": { + "description": "(The default is 2): the type of the collection to create. The following values for type are valid:
  • 2: document collection
  • 3: edges collection
", + "format": "int64", + "type": "integer" + }, + "waitForSync": { + "description": "If true then the data is synchronized to disk before returning from a document create, update, replace or removal operation. (default: false)
", + "format": "", + "type": "boolean" + } + }, + "_rev": "uuu", + "_key": "iii", + "_id": "ooo/iii", + "required": [ + "name" + ], + "type": "object", + "x-filename": "Collections - js/actions/_api/collection/app.js" + }, + { + "properties": { + "doCompact": { + "description": "whether or not the collection will be compacted (default is true)
", + "format": "", + "type": "boolean" + }, + "indexBuckets": { + "description": "The: number of buckets into which indexes using a hash table are split. The default is 16 and this number has to be a power of 2 and less than or equal to 1024.
For very large collections one should increase this to avoid long pauses when the hash table has to be initially built or resized, since buckets are resized individually and can be initially built in parallel. For example, 64 might be a sensible value for a collection with 100 000 000 documents. Currently, only the edge index respects this value, but other index types might follow in future ArangoDB versions. Changes (see below) are applied when the collection is loaded the next time.
", + "format": "int64", + "type": "integer" + }, + "isSystem": { + "description": "If true, create a system collection. In this case collection-name should start with an underscore. End users should normally create non-system collections only. API implementors may be required to create system collections in very special occasions, but normally a regular collection will do. (The default is false)
", + "format": "", + "type": "boolean" + }, + "isVolatile": { + "description": "If true then the collection data is kept in-memory only and not made persistent. Unloading the collection will cause the collection data to be discarded. Stopping or re-starting the server will also cause full loss of data in the collection. Setting this option will make the resulting collection be slightly faster than regular collections because ArangoDB does not enforce any synchronization to disk and does not calculate any CRC checksums for datafiles (as there are no datafiles). This option should therefore be used for cache-type collections only, and not for data that cannot be re-created otherwise. (The default is false)
", + "format": "", + "type": "boolean" + }, + "journalSize": { + "description": "The maximal size of a journal or datafile in bytes. The value must be at least `1048576` (1 MiB). (The default is a configuration parameter)
", + "format": "int64", + "type": "integer" + }, + "keyOptions": { + "$ref": "#/definitions/JSF_post_api_collection_opts" + }, + "name": { + "description": "The name of the collection.
", + "type": "string" + }, + "numberOfShards": { + "description": "(The default is 1): in a cluster, this value determines the number of shards to create for the collection. In a single server setup, this option is meaningless.
", + "format": "int64", + "type": "integer" + }, + "shardKeys": { + "description": "(The default is [ \"_key\" ]): in a cluster, this attribute determines which document attributes are used to determine the target shard for documents. Documents are sent to shards based on the values of their shard key attributes. The values of all shard key attributes in a document are hashed, and the hash value is used to determine the target shard. Note: Values of shard key attributes cannot be changed once set. This option is meaningless in a single server setup.
", + "type": "string" + }, + "type": { + "description": "(The default is 2): the type of the collection to create. The following values for type are valid:
  • 2: document collection
  • 3: edges collection
", + "format": "int64", + "type": "integer" + }, + "waitForSync": { + "description": "If true then the data is synchronized to disk before returning from a document create, update, replace or removal operation. (default: false)
", + "format": "", + "type": "boolean" + } + }, + "_rev": "uuu", + "_key": "iii", + "_id": "ooo/iii", + "required": [ + "name" + ], + "type": "object", + "x-filename": "Collections - js/actions/_api/collection/app.js" + }, + { + "error": true, + "errorNum": 11, + "code": 500, + "errorMessage": "bla" + }, + { + "properties": { + "doCompact": { + "description": "whether or not the collection will be compacted (default is true)
", + "format": "", + "type": "boolean" + }, + "indexBuckets": { + "description": "The: number of buckets into which indexes using a hash table are split. The default is 16 and this number has to be a power of 2 and less than or equal to 1024.
For very large collections one should increase this to avoid long pauses when the hash table has to be initially built or resized, since buckets are resized individually and can be initially built in parallel. For example, 64 might be a sensible value for a collection with 100 000 000 documents. Currently, only the edge index respects this value, but other index types might follow in future ArangoDB versions. Changes (see below) are applied when the collection is loaded the next time.
", + "format": "int64", + "type": "integer" + }, + "isSystem": { + "description": "If true, create a system collection. In this case collection-name should start with an underscore. End users should normally create non-system collections only. API implementors may be required to create system collections in very special occasions, but normally a regular collection will do. (The default is false)
", + "format": "", + "type": "boolean" + }, + "isVolatile": { + "description": "If true then the collection data is kept in-memory only and not made persistent. Unloading the collection will cause the collection data to be discarded. Stopping or re-starting the server will also cause full loss of data in the collection. Setting this option will make the resulting collection be slightly faster than regular collections because ArangoDB does not enforce any synchronization to disk and does not calculate any CRC checksums for datafiles (as there are no datafiles). This option should therefore be used for cache-type collections only, and not for data that cannot be re-created otherwise. (The default is false)
", + "format": "", + "type": "boolean" + }, + "journalSize": { + "description": "The maximal size of a journal or datafile in bytes. The value must be at least `1048576` (1 MiB). (The default is a configuration parameter)
", + "format": "int64", + "type": "integer" + }, + "keyOptions": { + "$ref": "#/definitions/JSF_post_api_collection_opts" + }, + "name": { + "description": "The name of the collection.
", + "type": "string" + }, + "numberOfShards": { + "description": "(The default is 1): in a cluster, this value determines the number of shards to create for the collection. In a single server setup, this option is meaningless.
", + "format": "int64", + "type": "integer" + }, + "shardKeys": { + "description": "(The default is [ \"_key\" ]): in a cluster, this attribute determines which document attributes are used to determine the target shard for documents. Documents are sent to shards based on the values of their shard key attributes. The values of all shard key attributes in a document are hashed, and the hash value is used to determine the target shard. Note: Values of shard key attributes cannot be changed once set. This option is meaningless in a single server setup.
", + "type": "string" + }, + "type": { + "description": "(The default is 2): the type of the collection to create. The following values for type are valid:
  • 2: document collection
  • 3: edges collection
", + "format": "int64", + "type": "integer" + }, + "waitForSync": { + "description": "If true then the data is synchronized to disk before returning from a document create, update, replace or removal operation. (default: false)
", + "format": "", + "type": "boolean" + } + }, + "_rev": "uuu", + "_key": "iii", + "_id": "ooo/iii", + "required": [ + "name" + ], + "type": "object", + "x-filename": "Collections - js/actions/_api/collection/app.js" + } +] \ No newline at end of file diff --git a/test-resilience/README.md b/test-resilience/README.md new file mode 100644 index 000000000..b96337d2c --- /dev/null +++ b/test-resilience/README.md @@ -0,0 +1,15 @@ +# arangodb-java-driver-resilience-tests + +## run + +Start (single server) ArangoDB: +```shell +./docker/start_db.sh +``` + +Start [toxiproxy-server](https://github.com/Shopify/toxiproxy) at `127.0.0.1:8474`. + +Run the tests: +```shell + mvn test -am -pl test-resilience +``` diff --git a/test-resilience/bin/startProxy.sh b/test-resilience/bin/startProxy.sh new file mode 100755 index 000000000..38515448d --- /dev/null +++ b/test-resilience/bin/startProxy.sh @@ -0,0 +1,5 @@ +#!/bin/bash + +wget -O bin/toxiproxy-server-linux-amd64 https://github.com/Shopify/toxiproxy/releases/download/${TOXIPROXY_VERSION}/toxiproxy-server-linux-amd64 +chmod a+x bin/toxiproxy-server-linux-amd64 +./bin/toxiproxy-server-linux-amd64 diff --git a/test-resilience/pom.xml b/test-resilience/pom.xml new file mode 100644 index 000000000..c97fec0a0 --- /dev/null +++ b/test-resilience/pom.xml @@ -0,0 +1,48 @@ + + + + ../test-parent + com.arangodb + test-parent + 7.22.0 + + 4.0.0 + + test-resilience + + + + org.mock-server + mockserver-netty + 5.15.0 + test + + + eu.rekawek.toxiproxy + toxiproxy-java + 2.1.7 + test + + + ch.qos.logback + logback-classic + 1.4.12 + test + + + + + + + io.netty + netty-bom + 4.1.93.Final + pom + import + + + + + \ No newline at end of file diff --git a/test-resilience/src/test/java/resilience/ClusterTest.java b/test-resilience/src/test/java/resilience/ClusterTest.java new file mode 100644 index 000000000..d15bbbcf6 --- /dev/null +++ b/test-resilience/src/test/java/resilience/ClusterTest.java @@ -0,0 +1,184 @@ +package resilience; + +import ch.qos.logback.classic.Level; +import com.arangodb.ArangoDB; +import com.arangodb.ArangoDBAsync; +import com.arangodb.Protocol; +import com.arangodb.Request; +import com.fasterxml.jackson.databind.node.ObjectNode; +import eu.rekawek.toxiproxy.Proxy; +import eu.rekawek.toxiproxy.ToxiproxyClient; +import org.junit.jupiter.api.*; + +import java.io.IOException; +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ExecutionException; +import java.util.stream.Stream; + +@Tag("cluster") +public abstract class ClusterTest extends TestUtils { + + private static final List endpoints = Arrays.asList( + new Endpoint("cluster1", HOST, 18529, UPSTREAM_GW + ":8529"), + new Endpoint("cluster2", HOST, 18539, UPSTREAM_GW + ":8539"), + new Endpoint("cluster3", HOST, 18549, UPSTREAM_GW + ":8549") + ); + + @BeforeAll + static void beforeAll() throws IOException { + ToxiproxyClient client = new ToxiproxyClient(HOST, 8474); + for (Endpoint endpoint : endpoints) { + Proxy p = client.getProxyOrNull(endpoint.getName()); + if (p != null) { + p.delete(); + } + endpoint.setProxy(client.createProxy(endpoint.getName(), endpoint.getHost() + ":" + endpoint.getPort(), endpoint.getUpstream())); + initServerId(endpoint); + } + } + + @AfterAll + static void afterAll() throws IOException { + for (Endpoint endpoint : endpoints) { + endpoint.getProxy().delete(); + } + } + + public ClusterTest() { + } + + public ClusterTest(Map, Level> logLevels) { + super(logLevels); + } + + @BeforeEach + void beforeEach() { + enableAllEndpoints(); + logs.reset(); + } + + protected static List getEndpoints() { + return endpoints; + } + + protected static ArangoDB.Builder dbBuilder() { + ArangoDB.Builder builder = new ArangoDB.Builder(); + for (Endpoint endpoint : getEndpoints()) { + builder.host(endpoint.getHost(), endpoint.getPort()); + } + return builder.password(PASSWORD); + } + + protected static Stream protocolProvider() { + return Stream.of(Protocol.values()) + .filter(p -> !p.equals(Protocol.VST) || isLessThanVersion(3, 12)); + } + + protected static Stream builderProvider() { + return protocolProvider().map(p -> dbBuilder().protocol(p)); + } + + protected static Stream adbProvider() { + return builderProvider().map(ArangoDB.Builder::build); + } + + protected static Stream asyncAdbProvider() { + return adbProvider().map(ArangoDB::async); + } + + protected static String serverIdGET(ArangoDB adb) { + return adb.execute(Request.builder() + .method(Request.Method.GET) + .path("/_admin/status") + .build(), ObjectNode.class) + .getBody() + .get("serverInfo") + .get("serverId") + .textValue(); + } + + protected static String serverIdGET(ArangoDBAsync adb) { + try { + return adb.execute(Request.builder() + .method(Request.Method.GET) + .path("/_admin/status") + .build(), ObjectNode.class) + .get() + .getBody() + .get("serverInfo") + .get("serverId") + .textValue(); + } catch (InterruptedException | ExecutionException e) { + throw new RuntimeException(e); + } + } + + protected static String serverIdPOST(ArangoDB adb) { + return adb.execute(Request.builder() + .method(Request.Method.POST) + .path("/_admin/status") + .build(), ObjectNode.class) + .getBody() + .get("serverInfo") + .get("serverId") + .textValue(); + } + + protected static String serverIdPOST(ArangoDBAsync adb) { + try { + return adb.execute(Request.builder() + .method(Request.Method.POST) + .path("/_admin/status") + .build(), ObjectNode.class) + .get() + .getBody() + .get("serverInfo") + .get("serverId") + .textValue(); + } catch (ExecutionException e) { + Throwable cause = e.getCause(); + if (cause instanceof RuntimeException) { + throw (RuntimeException) cause; + } else { + throw new RuntimeException(e); + } + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + } + + private static void initServerId(Endpoint endpoint) { + ArangoDB adb = new ArangoDB.Builder() + .host(endpoint.getHost(), endpoint.getPort()) + .password(PASSWORD) + .build(); + String serverId = serverIdGET(adb); + endpoint.setServerId(serverId); + adb.shutdown(); + } + + protected void enableAllEndpoints() { + try { + for (Endpoint endpoint : endpoints) { + endpoint.getProxy().enable(); + } + Thread.sleep(100); + } catch (InterruptedException | IOException e) { + throw new RuntimeException(e); + } + } + + protected void disableAllEndpoints() { + try { + for (Endpoint endpoint : endpoints) { + endpoint.getProxy().disable(); + } + Thread.sleep(100); + } catch (InterruptedException | IOException e) { + throw new RuntimeException(e); + } + } + +} diff --git a/test-resilience/src/test/java/resilience/Endpoint.java b/test-resilience/src/test/java/resilience/Endpoint.java new file mode 100644 index 000000000..d602ec7b4 --- /dev/null +++ b/test-resilience/src/test/java/resilience/Endpoint.java @@ -0,0 +1,84 @@ +package resilience; + +import eu.rekawek.toxiproxy.Proxy; + +import java.io.IOException; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; + +/** + * class representing a proxied db endpoint + */ +public class Endpoint { + private final String name; + private final String host; + private final int port; + private final String upstream; + private Proxy proxy; + private String serverId; + + public Endpoint(String name, String host, int port, String upstream) { + this.name = name; + this.host = host; + this.port = port; + this.upstream = upstream; + } + + public String getName() { + return name; + } + + public String getHost() { + return host; + } + + public int getPort() { + return port; + } + + public String getUpstream() { + return upstream; + } + + public Proxy getProxy() { + return proxy; + } + + public void setProxy(Proxy proxy) { + this.proxy = proxy; + } + + public String getServerId() { + return serverId; + } + + public void setServerId(String serverId) { + this.serverId = serverId; + } + + public void enable() { + try { + getProxy().enable(); + Thread.sleep(100); + } catch (IOException | InterruptedException e) { + throw new RuntimeException(e); + } + } + + public void disableNow() { + try { + getProxy().disable(); + Thread.sleep(100); + } catch (IOException | InterruptedException e) { + e.printStackTrace(); + throw new RuntimeException(e); + } + } + + public void disable(long delay) { + ScheduledExecutorService es = Executors.newSingleThreadScheduledExecutor(); + es.schedule(this::disableNow, delay, TimeUnit.MILLISECONDS); + es.shutdown(); + } +} diff --git a/test-resilience/src/test/java/resilience/MockTest.java b/test-resilience/src/test/java/resilience/MockTest.java new file mode 100644 index 000000000..c75ecc27a --- /dev/null +++ b/test-resilience/src/test/java/resilience/MockTest.java @@ -0,0 +1,40 @@ +package resilience; + +import ch.qos.logback.classic.Level; +import com.arangodb.ArangoDB; +import com.arangodb.Protocol; +import com.arangodb.internal.net.Communication; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.mockserver.integration.ClientAndServer; + +import java.util.Collections; + +import static org.mockserver.integration.ClientAndServer.startClientAndServer; + +public class MockTest extends SingleServerTest { + + protected ClientAndServer mockServer; + protected ArangoDB arangoDB; + + public MockTest() { + super(Collections.singletonMap(Communication.class, Level.DEBUG)); + } + + @BeforeEach + void before() { + mockServer = startClientAndServer(getEndpoint().getHost(), getEndpoint().getPort()); + arangoDB = new ArangoDB.Builder() + .protocol(Protocol.HTTP_JSON) + .password(PASSWORD) + .host("127.0.0.1", mockServer.getPort()) + .build(); + } + + @AfterEach + void after() { + arangoDB.shutdown(); + mockServer.stop(); + } + +} diff --git a/test-resilience/src/test/java/resilience/SingleServerTest.java b/test-resilience/src/test/java/resilience/SingleServerTest.java new file mode 100644 index 000000000..852d0a013 --- /dev/null +++ b/test-resilience/src/test/java/resilience/SingleServerTest.java @@ -0,0 +1,78 @@ +package resilience; + +import ch.qos.logback.classic.Level; +import com.arangodb.ArangoDB; +import com.arangodb.ArangoDBAsync; +import com.arangodb.Protocol; +import eu.rekawek.toxiproxy.Proxy; +import eu.rekawek.toxiproxy.ToxiproxyClient; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; + +import java.io.IOException; +import java.util.Map; +import java.util.stream.Stream; + +@Tag("singleServer") +public abstract class SingleServerTest extends TestUtils { + + private static final Endpoint endpoint = new Endpoint("singleServer", HOST, 18529, UPSTREAM_GW + ":8529"); + + public SingleServerTest() { + } + + public SingleServerTest(Map, Level> logLevels) { + super(logLevels); + } + + @BeforeAll + static void beforeAll() throws IOException { + ToxiproxyClient client = new ToxiproxyClient(HOST, 8474); + Proxy p = client.getProxyOrNull(endpoint.getName()); + if (p != null) { + p.delete(); + } + endpoint.setProxy(client.createProxy(endpoint.getName(), HOST + ":" + endpoint.getPort(), endpoint.getUpstream())); + } + + @AfterAll + static void afterAll() throws IOException { + endpoint.getProxy().delete(); + } + + @BeforeEach + void beforeEach() { + getEndpoint().enable(); + logs.reset(); + } + + protected static Endpoint getEndpoint() { + return endpoint; + } + + protected static ArangoDB.Builder dbBuilder() { + return new ArangoDB.Builder() + .host(endpoint.getHost(), endpoint.getPort()) + .password(PASSWORD); + } + + protected static Stream protocolProvider() { + return Stream.of(Protocol.values()) + .filter(p -> !p.equals(Protocol.VST) || isLessThanVersion(3, 12)); + } + + protected static Stream builderProvider() { + return protocolProvider().map(p -> dbBuilder().protocol(p)); + } + + protected static Stream adbProvider() { + return builderProvider().map(ArangoDB.Builder::build); + } + + protected static Stream asyncAdbProvider() { + return adbProvider().map(ArangoDB::async); + } + +} diff --git a/test-resilience/src/test/java/resilience/TestUtils.java b/test-resilience/src/test/java/resilience/TestUtils.java new file mode 100644 index 000000000..9a827ac4f --- /dev/null +++ b/test-resilience/src/test/java/resilience/TestUtils.java @@ -0,0 +1,133 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package resilience; + + +import ch.qos.logback.classic.Level; +import ch.qos.logback.classic.Logger; +import com.arangodb.ArangoDB; +import com.arangodb.entity.ArangoDBVersion; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.slf4j.LoggerFactory; +import resilience.utils.MemoryAppender; + +import java.util.HashMap; +import java.util.Map; + +/** + * @author Michele Rastelli + */ +public abstract class TestUtils { + + protected static final String HOST = "127.0.0.1"; + protected static final String UPSTREAM_GW = "172.28.0.1"; + protected static final String PASSWORD = "test"; + protected static final MemoryAppender logs = new MemoryAppender(); + private static final ArangoDBVersion version = new ArangoDB.Builder() + .host(UPSTREAM_GW, 8529) + .password(PASSWORD) + .build() + .getVersion(); + + public TestUtils() { + } + + public TestUtils(Map, Level> logLevels) { + this.logLevels.putAll(logLevels); + } + + protected static boolean isAtLeastVersion(final int major, final int minor) { + return isAtLeastVersion(major, minor, 0); + } + + protected static boolean isAtLeastVersion(final int major, final int minor, final int patch) { + return isAtLeastVersion(version.getVersion(), major, minor, patch); + } + + protected static boolean isLessThanVersion(final int major, final int minor) { + return isLessThanVersion(major, minor, 0); + } + + protected static boolean isLessThanVersion(final int major, final int minor, final int patch) { + return isLessThanVersion(version.getVersion(), major, minor, patch); + } + + /** + * Parses {@param version} and checks whether it is greater or equal to <{@param otherMajor}, {@param otherMinor}, + * {@param otherPatch}> comparing the corresponding version components in lexicographical order. + */ + private static boolean isAtLeastVersion(final String version, final int otherMajor, final int otherMinor, + final int otherPatch) { + return compareVersion(version, otherMajor, otherMinor, otherPatch) >= 0; + } + + /** + * Parses {@param version} and checks whether it is less than <{@param otherMajor}, {@param otherMinor}, + * {@param otherPatch}> comparing the corresponding version components in lexicographical order. + */ + private static boolean isLessThanVersion(final String version, final int otherMajor, final int otherMinor, + final int otherPatch) { + return compareVersion(version, otherMajor, otherMinor, otherPatch) < 0; + } + + private static int compareVersion(final String version, final int otherMajor, final int otherMinor, + final int otherPatch) { + String[] parts = version.split("-")[0].split("\\."); + + int major = Integer.parseInt(parts[0]); + int minor = Integer.parseInt(parts[1]); + int patch = Integer.parseInt(parts[2]); + + int majorComparison = Integer.compare(major, otherMajor); + if (majorComparison != 0) { + return majorComparison; + } + + int minorComparison = Integer.compare(minor, otherMinor); + if (minorComparison != 0) { + return minorComparison; + } + + return Integer.compare(patch, otherPatch); + } + + private final Map, Level> logLevels = new HashMap<>(); + private final Map, Level> originalLogLevels = new HashMap<>(); + + @BeforeEach + void setLogLevels() { + logLevels.forEach((clazz, level) -> { + Logger logger = (Logger) LoggerFactory.getLogger(clazz); + originalLogLevels.put(clazz, logger.getLevel()); + logger.setLevel(level); + }); + } + + @AfterEach + void resetLogLevels() { + originalLogLevels.forEach((clazz, level) -> { + Logger logger = (Logger) LoggerFactory.getLogger(clazz); + logger.setLevel(level); + }); + } + +} diff --git a/test-resilience/src/test/java/resilience/compression/CompressionTest.java b/test-resilience/src/test/java/resilience/compression/CompressionTest.java new file mode 100644 index 000000000..d33e345d0 --- /dev/null +++ b/test-resilience/src/test/java/resilience/compression/CompressionTest.java @@ -0,0 +1,80 @@ +package resilience.compression; + +import ch.qos.logback.classic.Level; +import ch.qos.logback.classic.spi.ILoggingEvent; +import com.arangodb.ArangoDB; +import com.arangodb.Compression; +import com.arangodb.Protocol; +import io.netty.handler.codec.http2.Http2FrameLogger; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; +import resilience.ClusterTest; + +import java.util.*; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + +/** + * @author Michele Rastelli + */ +class CompressionTest extends ClusterTest { + + CompressionTest() { + super(Collections.singletonMap(Http2FrameLogger.class, Level.DEBUG)); + } + + @ParameterizedTest + @MethodSource("protocolProvider") + void gzip(Protocol protocol) { + doTest(protocol, Compression.GZIP); + } + + @ParameterizedTest + @MethodSource("protocolProvider") + void deflate(Protocol protocol) { + doTest(protocol, Compression.DEFLATE); + } + + void doTest(Protocol protocol, Compression compression) { + assumeTrue(isAtLeastVersion(3, 12)); + assumeTrue(protocol != Protocol.VST); + + assumeTrue(protocol != Protocol.HTTP_VPACK, "hex dumps logs"); // FIXME + assumeTrue(protocol != Protocol.HTTP_JSON, "hex dumps logs"); // FIXME + + // FIXME: + // When using HTTP_VPACK or HTTP_JSON, the logs are hex dumps. + // Implement a way to check the content-encoding and accept-encoding headers from these logs. + + ArangoDB adb = dbBuilder() + .protocol(protocol) + .compression(compression) + .compressionThreshold(0) + .build(); + + List data = IntStream.range(0, 500) + .mapToObj(i -> UUID.randomUUID().toString()) + .collect(Collectors.toList()); + + adb.db().query("FOR i IN @data RETURN i", String.class, + Collections.singletonMap("data", data)).asListRemaining(); + + adb.shutdown(); + + String compressionLC = compression.toString().toLowerCase(Locale.ROOT); + + // request + assertThat(logs.getLogs()) + .map(ILoggingEvent::getFormattedMessage) + .anyMatch(l -> l.contains("content-encoding: " + compressionLC) && l.contains("accept-encoding: " + compressionLC)); + + // response + assertThat(logs.getLogs()) + .map(ILoggingEvent::getFormattedMessage) + .anyMatch(l -> l.contains("content-encoding: " + compressionLC) && l.contains("server: ArangoDB")); + } + +} diff --git a/test-resilience/src/test/java/resilience/connection/AcquireHostListTest.java b/test-resilience/src/test/java/resilience/connection/AcquireHostListTest.java new file mode 100644 index 000000000..be8d02d5f --- /dev/null +++ b/test-resilience/src/test/java/resilience/connection/AcquireHostListTest.java @@ -0,0 +1,58 @@ +package resilience.connection; + +import com.arangodb.ArangoDB; +import com.arangodb.Protocol; +import com.arangodb.entity.LoadBalancingStrategy; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.EnumSource; +import org.junit.jupiter.params.provider.MethodSource; +import resilience.ClusterTest; +import resilience.Endpoint; + +import java.util.HashSet; +import java.util.Set; +import java.util.stream.Collectors; + +import static org.assertj.core.api.Assertions.assertThat; + +public class AcquireHostListTest extends ClusterTest { + + @ParameterizedTest(name = "{index}") + @MethodSource("protocolProvider") + void acquireHostList(Protocol protocol) { + ArangoDB adb = new ArangoDB.Builder() + .host("172.28.0.1", 8529) + .password("test") + .acquireHostList(true) + .protocol(protocol) + .loadBalancingStrategy(LoadBalancingStrategy.ROUND_ROBIN) + .build(); + + Set serverIds = getEndpoints().stream() + .map(Endpoint::getServerId) + .collect(Collectors.toSet()); + Set retrievedIds = new HashSet<>(); + + for (int i = 0; i < serverIds.size(); i++) { + retrievedIds.add(serverIdGET(adb)); + } + + assertThat(retrievedIds).containsExactlyInAnyOrderElementsOf(serverIds); + } + + @ParameterizedTest(name = "{index}") + @EnumSource(LoadBalancingStrategy.class) + void acquireHostListWithLoadBalancingStrategy(LoadBalancingStrategy lb) { + ArangoDB adb = new ArangoDB.Builder() + .host("172.28.0.1", 8529) + .password("test") + .acquireHostList(true) + .loadBalancingStrategy(lb) + .build(); + + adb.getVersion(); + adb.getVersion(); + adb.getVersion(); + } + +} diff --git a/test-resilience/src/test/java/resilience/connection/ConnectionClusterTest.java b/test-resilience/src/test/java/resilience/connection/ConnectionClusterTest.java new file mode 100644 index 000000000..3748ea975 --- /dev/null +++ b/test-resilience/src/test/java/resilience/connection/ConnectionClusterTest.java @@ -0,0 +1,207 @@ +package resilience.connection; + +import ch.qos.logback.classic.Level; +import com.arangodb.*; +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; +import resilience.ClusterTest; + +import java.net.ConnectException; +import java.net.UnknownHostException; +import java.util.concurrent.ExecutionException; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.catchThrowable; + +/** + * @author Michele Rastelli + */ +class ConnectionClusterTest extends ClusterTest { + + @ParameterizedTest + @MethodSource("protocolProvider") + @Disabled + void nameResolutionFail(Protocol protocol) { + // FIXME: make this test faster and re-enable + ArangoDB arangoDB = new ArangoDB.Builder() + .host("wrongHost", 8529) + .protocol(protocol) + .build(); + + Throwable thrown = catchThrowable(arangoDB::getVersion); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + assertThat(thrown.getMessage()).contains("Cannot contact any host!"); + assertThat(thrown.getCause()).isNotNull(); + assertThat(thrown.getCause()).isInstanceOf(ArangoDBMultipleException.class); + ((ArangoDBMultipleException) thrown.getCause()).getExceptions().forEach(e -> { + assertThat(e).isInstanceOf(UnknownHostException.class); + assertThat(e.getMessage()).contains("wrongHost"); + }); + arangoDB.shutdown(); + } + + @ParameterizedTest + @MethodSource("protocolProvider") + @Disabled + void nameResolutionFailAsync(Protocol protocol) { + // FIXME: make this test faster and re-enable + ArangoDBAsync arangoDB = new ArangoDB.Builder() + .host("wrongHost", 8529) + .protocol(protocol) + .build() + .async(); + + Throwable thrown = catchThrowable(() -> arangoDB.getVersion().get()).getCause(); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + assertThat(thrown.getMessage()).contains("Cannot contact any host!"); + assertThat(thrown.getCause()).isNotNull(); + assertThat(thrown.getCause()).isInstanceOf(ArangoDBMultipleException.class); + ((ArangoDBMultipleException) thrown.getCause()).getExceptions().forEach(e -> { + assertThat(e).isInstanceOf(UnknownHostException.class); + assertThat(e.getMessage()).contains("wrongHost"); + }); + arangoDB.shutdown(); + } + + @ParameterizedTest + @MethodSource("protocolProvider") + @Disabled + void nameResolutionFailover(Protocol protocol) { + // FIXME: make this test faster and re-enable + ArangoDB arangoDB = new ArangoDB.Builder() + .password("test") + .host("wrongHost", 8529) + .host("127.0.0.1", 8529) + .protocol(protocol) + .build(); + + arangoDB.getVersion(); + + assertThat(logs.getLogs()) + .filteredOn(e -> e.getLevel().equals(Level.WARN)) + .anyMatch(e -> e.getFormattedMessage().contains("Could not connect to host")); + + arangoDB.shutdown(); + } + + @ParameterizedTest + @MethodSource("protocolProvider") + @Disabled + void nameResolutionFailoverAsync(Protocol protocol) throws ExecutionException, InterruptedException { + // FIXME: make this test faster and re-enable + ArangoDBAsync arangoDB = new ArangoDB.Builder() + .password("test") + .host("wrongHost", 8529) + .host("127.0.0.1", 8529) + .protocol(protocol) + .build() + .async(); + + arangoDB.getVersion().get(); + + assertThat(logs.getLogs()) + .filteredOn(e -> e.getLevel().equals(Level.WARN)) + .anyMatch(e -> e.getFormattedMessage().contains("Could not connect to host")); + + arangoDB.shutdown(); + } + + @ParameterizedTest(name = "{index}") + @MethodSource("adbProvider") + void connectionFail(ArangoDB arangoDB) { + disableAllEndpoints(); + + Throwable thrown = catchThrowable(arangoDB::getVersion); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + assertThat(thrown.getMessage()).contains("Cannot contact any host"); + assertThat(thrown.getCause()).isNotNull(); + assertThat(thrown.getCause()).isInstanceOf(ArangoDBMultipleException.class); + ((ArangoDBMultipleException) thrown.getCause()).getExceptions().forEach(e -> + assertThat(e).isInstanceOf(ConnectException.class)); + + arangoDB.shutdown(); + enableAllEndpoints(); + } + + @ParameterizedTest(name = "{index}") + @MethodSource("asyncAdbProvider") + void connectionFailAsync(ArangoDBAsync arangoDB) { + disableAllEndpoints(); + + Throwable thrown = catchThrowable(() -> arangoDB.getVersion().get()).getCause(); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + assertThat(thrown.getMessage()).contains("Cannot contact any host"); + assertThat(thrown.getCause()).isNotNull(); + assertThat(thrown.getCause()).isInstanceOf(ArangoDBMultipleException.class); + ((ArangoDBMultipleException) thrown.getCause()).getExceptions().forEach(e -> + assertThat(e).isInstanceOf(ConnectException.class)); + arangoDB.shutdown(); + enableAllEndpoints(); + } + + @ParameterizedTest(name = "{index}") + @MethodSource("adbProvider") + void connectionFailover(ArangoDB arangoDB) { + getEndpoints().get(0).disableNow(); + getEndpoints().get(1).disableNow(); + + arangoDB.getVersion(); + + assertThat(logs.getLogs()) + .filteredOn(e -> e.getLevel().equals(Level.WARN)) + .anyMatch(e -> e.getFormattedMessage().contains("Could not connect to host")); + + arangoDB.shutdown(); + enableAllEndpoints(); + } + + @ParameterizedTest(name = "{index}") + @MethodSource("asyncAdbProvider") + void connectionFailoverAsync(ArangoDBAsync arangoDB) throws ExecutionException, InterruptedException { + getEndpoints().get(0).disableNow(); + getEndpoints().get(1).disableNow(); + + arangoDB.getVersion().get(); + + assertThat(logs.getLogs()) + .filteredOn(e -> e.getLevel().equals(Level.WARN)) + .anyMatch(e -> e.getFormattedMessage().contains("Could not connect to host")); + + arangoDB.shutdown(); + enableAllEndpoints(); + } + + @ParameterizedTest(name = "{index}") + @MethodSource("adbProvider") + void connectionFailoverPost(ArangoDB arangoDB) { + getEndpoints().get(0).disableNow(); + getEndpoints().get(1).disableNow(); + + arangoDB.db().query("RETURN 1", Integer.class); + + assertThat(logs.getLogs()) + .filteredOn(e -> e.getLevel().equals(Level.WARN)) + .anyMatch(e -> e.getFormattedMessage().contains("Could not connect to host")); + + arangoDB.shutdown(); + enableAllEndpoints(); + } + + @ParameterizedTest(name = "{index}") + @MethodSource("asyncAdbProvider") + void connectionFailoverPostAsync(ArangoDBAsync arangoDB) throws ExecutionException, InterruptedException { + getEndpoints().get(0).disableNow(); + getEndpoints().get(1).disableNow(); + + arangoDB.db().query("RETURN 1", Integer.class).get(); + + assertThat(logs.getLogs()) + .filteredOn(e -> e.getLevel().equals(Level.WARN)) + .anyMatch(e -> e.getFormattedMessage().contains("Could not connect to host")); + + arangoDB.shutdown(); + enableAllEndpoints(); + } + +} diff --git a/test-resilience/src/test/java/resilience/connection/ConnectionTest.java b/test-resilience/src/test/java/resilience/connection/ConnectionTest.java new file mode 100644 index 000000000..b64f1dd42 --- /dev/null +++ b/test-resilience/src/test/java/resilience/connection/ConnectionTest.java @@ -0,0 +1,194 @@ +package resilience.connection; + +import com.arangodb.*; +import eu.rekawek.toxiproxy.model.ToxicDirection; +import eu.rekawek.toxiproxy.model.toxic.ResetPeer; +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; +import resilience.SingleServerTest; + +import java.io.IOException; +import java.net.ConnectException; +import java.net.UnknownHostException; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.catchThrowable; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + +/** + * @author Michele Rastelli + */ +class ConnectionTest extends SingleServerTest { + + @ParameterizedTest + @MethodSource("protocolProvider") + @Disabled + void nameResolutionFail(Protocol protocol) { + // FIXME: make this test faster and re-enable + ArangoDB arangoDB = new ArangoDB.Builder() + .host("wrongHost", 8529) + .protocol(protocol) + .build(); + + Throwable thrown = catchThrowable(arangoDB::getVersion); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + assertThat(thrown.getMessage()).contains("Cannot contact any host!"); + assertThat(thrown.getCause()).isNotNull(); + assertThat(thrown.getCause()).isInstanceOf(ArangoDBMultipleException.class); + ((ArangoDBMultipleException) thrown.getCause()).getExceptions().forEach(e -> { + assertThat(e).isInstanceOf(UnknownHostException.class); + assertThat(e.getMessage()).contains("wrongHost"); + }); + arangoDB.shutdown(); + } + + @ParameterizedTest + @MethodSource("protocolProvider") + @Disabled + void nameResolutionFailAsync(Protocol protocol) { + // FIXME: make this test faster and re-enable + ArangoDBAsync arangoDB = new ArangoDB.Builder() + .host("wrongHost", 8529) + .protocol(protocol) + .build() + .async(); + + Throwable thrown = catchThrowable(() -> arangoDB.getVersion().get()).getCause(); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + assertThat(thrown.getMessage()).contains("Cannot contact any host!"); + assertThat(thrown.getCause()).isNotNull(); + assertThat(thrown.getCause()).isInstanceOf(ArangoDBMultipleException.class); + ((ArangoDBMultipleException) thrown.getCause()).getExceptions().forEach(e -> { + assertThat(e).isInstanceOf(UnknownHostException.class); + assertThat(e.getMessage()).contains("wrongHost"); + }); + arangoDB.shutdown(); + } + + @ParameterizedTest(name = "{index}") + @MethodSource("adbProvider") + void connectionFail(ArangoDB arangoDB) { + getEndpoint().disableNow(); + Throwable thrown = catchThrowable(arangoDB::getVersion); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + assertThat(thrown.getMessage()).contains("Cannot contact any host"); + assertThat(thrown.getCause()).isNotNull(); + assertThat(thrown.getCause()).isInstanceOf(ArangoDBMultipleException.class); + ((ArangoDBMultipleException) thrown.getCause()).getExceptions().forEach(e -> + assertThat(e).isInstanceOf(ConnectException.class)); + arangoDB.shutdown(); + getEndpoint().enable(); + } + + @ParameterizedTest(name = "{index}") + @MethodSource("asyncAdbProvider") + void connectionFailAsync(ArangoDBAsync arangoDB) { + getEndpoint().disableNow(); + + Throwable thrown = catchThrowable(() -> arangoDB.getVersion().get()).getCause(); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + assertThat(thrown.getMessage()).contains("Cannot contact any host"); + assertThat(thrown.getCause()).isNotNull(); + assertThat(thrown.getCause()).isInstanceOf(ArangoDBMultipleException.class); + ((ArangoDBMultipleException) thrown.getCause()).getExceptions().forEach(e -> + assertThat(e).isInstanceOf(ConnectException.class)); + arangoDB.shutdown(); + getEndpoint().enable(); + } + + @ParameterizedTest(name = "{index}") + @MethodSource("protocolProvider") + void authFail(Protocol protocol) { + ArangoDB adb = new ArangoDB.Builder() + .host(getEndpoint().getHost(), getEndpoint().getPort()) + .protocol(protocol) + .password("wrong") + .build(); + + Throwable thrown = catchThrowable(adb::getVersion); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + ArangoDBException aEx = (ArangoDBException) thrown; + assertThat(aEx.getResponseCode()).isEqualTo(401); + adb.shutdown(); + } + + @ParameterizedTest(name = "{index}") + @MethodSource("protocolProvider") + void authFailAsync(Protocol protocol) { + ArangoDBAsync adb = new ArangoDB.Builder() + .host(getEndpoint().getHost(), getEndpoint().getPort()) + .protocol(protocol) + .password("wrong") + .build() + .async(); + + Throwable thrown = catchThrowable(() -> adb.getVersion().get()).getCause(); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + ArangoDBException aEx = (ArangoDBException) thrown; + assertThat(aEx.getResponseCode()).isEqualTo(401); + adb.shutdown(); + } + + @ParameterizedTest(name = "{index}") + @MethodSource("adbProvider") + void connClose(ArangoDB adb) { + getEndpoint().disable(500); + Throwable thrown = catchThrowable(() -> adb.db().query("RETURN SLEEP(1)", Void.class)); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + assertThat(thrown.getCause()).isInstanceOf(IOException.class); + adb.shutdown(); + } + + @ParameterizedTest(name = "{index}") + @MethodSource("asyncAdbProvider") + void connCloseAsync(ArangoDBAsync adb) { + getEndpoint().disable(500); + Throwable thrown = catchThrowable(() -> adb.db().query("RETURN SLEEP(1)", Void.class).get()).getCause(); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + assertThat(thrown.getCause()).isInstanceOf(IOException.class); + adb.shutdown(); + } + + @ParameterizedTest(name = "{index}") + @MethodSource("protocolProvider") + void connReset(Protocol protocol) throws IOException, InterruptedException { + assumeTrue(!protocol.equals(Protocol.VST), "DE-776"); // FIXME + ArangoDB adb = new ArangoDB.Builder() + .host(getEndpoint().getHost(), getEndpoint().getPort()) + .protocol(protocol) + .password("test") + .build(); + + ResetPeer toxic = getEndpoint().getProxy().toxics().resetPeer("reset", ToxicDirection.DOWNSTREAM, 500); + Thread.sleep(100); + + Throwable thrown = catchThrowable(() -> adb.db().query("RETURN SLEEP(1)", Void.class)); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + assertThat(thrown.getCause()).isInstanceOf(IOException.class); + adb.shutdown(); + toxic.remove(); + } + + @ParameterizedTest(name = "{index}") + @MethodSource("protocolProvider") + void connResetAsync(Protocol protocol) throws IOException, InterruptedException { + assumeTrue(!protocol.equals(Protocol.VST), "DE-776"); // FIXME + ArangoDBAsync adb = new ArangoDB.Builder() + .host(getEndpoint().getHost(), getEndpoint().getPort()) + .protocol(protocol) + .password("test") + .build() + .async(); + + ResetPeer toxic = getEndpoint().getProxy().toxics().resetPeer("reset", ToxicDirection.DOWNSTREAM, 500); + Thread.sleep(100); + + Throwable thrown = catchThrowable(() -> adb.db().query("RETURN SLEEP(1)", Void.class).get()).getCause(); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + assertThat(thrown.getCause()).isInstanceOf(IOException.class); + adb.shutdown(); + toxic.remove(); + } + +} diff --git a/test-resilience/src/test/java/resilience/loadbalance/LoadBalanceNoneClusterTest.java b/test-resilience/src/test/java/resilience/loadbalance/LoadBalanceNoneClusterTest.java new file mode 100644 index 000000000..d225ed328 --- /dev/null +++ b/test-resilience/src/test/java/resilience/loadbalance/LoadBalanceNoneClusterTest.java @@ -0,0 +1,157 @@ +package resilience.loadbalance; + +import com.arangodb.ArangoDB; +import com.arangodb.ArangoDBAsync; +import com.arangodb.ArangoDBException; +import com.arangodb.entity.LoadBalancingStrategy; +import eu.rekawek.toxiproxy.model.ToxicDirection; +import eu.rekawek.toxiproxy.model.toxic.Latency; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; +import resilience.ClusterTest; +import resilience.Endpoint; + +import java.io.IOException; +import java.util.List; +import java.util.stream.Stream; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.catchThrowable; + +public class LoadBalanceNoneClusterTest extends ClusterTest { + + static Stream arangoProvider() { + return builderProvider().map(it->it.loadBalancingStrategy(LoadBalancingStrategy.NONE).build()); + } + + static Stream asyncArangoProvider() { + return arangoProvider().map(ArangoDB::async); + } + + @ParameterizedTest(name = "{index}") + @MethodSource("arangoProvider") + void loadBalancing(ArangoDB arangoDB) { + List endpoints = getEndpoints(); + assertThat(serverIdGET(arangoDB)).isEqualTo(endpoints.get(0).getServerId()); + assertThat(serverIdGET(arangoDB)).isEqualTo(endpoints.get(0).getServerId()); + } + + @ParameterizedTest(name = "{index}") + @MethodSource("asyncArangoProvider") + void loadBalancingAsync(ArangoDBAsync arangoDB) { + List endpoints = getEndpoints(); + assertThat(serverIdGET(arangoDB)).isEqualTo(endpoints.get(0).getServerId()); + assertThat(serverIdGET(arangoDB)).isEqualTo(endpoints.get(0).getServerId()); + } + + @ParameterizedTest(name = "{index}") + @MethodSource("arangoProvider") + void failover(ArangoDB arangoDB) { + List endpoints = getEndpoints(); + + endpoints.get(0).disableNow(); + assertThat(serverIdGET(arangoDB)).isEqualTo(endpoints.get(1).getServerId()); + assertThat(serverIdGET(arangoDB)).isEqualTo(endpoints.get(1).getServerId()); + enableAllEndpoints(); + + endpoints.get(1).disableNow(); + assertThat(serverIdGET(arangoDB)).isEqualTo(endpoints.get(2).getServerId()); + assertThat(serverIdGET(arangoDB)).isEqualTo(endpoints.get(2).getServerId()); + enableAllEndpoints(); + + endpoints.get(2).disableNow(); + assertThat(serverIdGET(arangoDB)).isEqualTo(endpoints.get(0).getServerId()); + assertThat(serverIdGET(arangoDB)).isEqualTo(endpoints.get(0).getServerId()); + enableAllEndpoints(); + } + + @ParameterizedTest(name = "{index}") + @MethodSource("asyncArangoProvider") + void failoverAsync(ArangoDBAsync arangoDB) { + List endpoints = getEndpoints(); + + endpoints.get(0).disableNow(); + assertThat(serverIdGET(arangoDB)).isEqualTo(endpoints.get(1).getServerId()); + assertThat(serverIdGET(arangoDB)).isEqualTo(endpoints.get(1).getServerId()); + enableAllEndpoints(); + + endpoints.get(1).disableNow(); + assertThat(serverIdGET(arangoDB)).isEqualTo(endpoints.get(2).getServerId()); + assertThat(serverIdGET(arangoDB)).isEqualTo(endpoints.get(2).getServerId()); + enableAllEndpoints(); + + endpoints.get(2).disableNow(); + assertThat(serverIdGET(arangoDB)).isEqualTo(endpoints.get(0).getServerId()); + assertThat(serverIdGET(arangoDB)).isEqualTo(endpoints.get(0).getServerId()); + enableAllEndpoints(); + } + + + @ParameterizedTest(name = "{index}") + @MethodSource("arangoProvider") + void retryGET(ArangoDB arangoDB) throws IOException, InterruptedException { + List endpoints = getEndpoints(); + + assertThat(serverIdGET(arangoDB)).isEqualTo(endpoints.get(0).getServerId()); + + // slow down the driver connection + Latency toxic = getEndpoints().get(0).getProxy().toxics().latency("latency", ToxicDirection.DOWNSTREAM, 10_000); + Thread.sleep(100); + + getEndpoints().get(0).disable(300); + assertThat(serverIdGET(arangoDB)).isEqualTo(endpoints.get(1).getServerId()); + assertThat(serverIdGET(arangoDB)).isEqualTo(endpoints.get(1).getServerId()); + + toxic.remove(); + enableAllEndpoints(); + } + + + @ParameterizedTest(name = "{index}") + @MethodSource("arangoProvider") + void retryPOST(ArangoDB arangoDB) throws IOException, InterruptedException { + List endpoints = getEndpoints(); + + assertThat(serverIdPOST(arangoDB)).isEqualTo(endpoints.get(0).getServerId()); + + // slow down the driver connection + Latency toxic = getEndpoints().get(0).getProxy().toxics().latency("latency", ToxicDirection.DOWNSTREAM, 10_000); + Thread.sleep(100); + + getEndpoints().get(0).disable(300); + Throwable thrown = catchThrowable(() -> serverIdPOST(arangoDB)); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + assertThat(thrown.getCause()).isInstanceOf(IOException.class); + + assertThat(serverIdPOST(arangoDB)).isEqualTo(endpoints.get(1).getServerId()); + assertThat(serverIdPOST(arangoDB)).isEqualTo(endpoints.get(1).getServerId()); + + toxic.remove(); + enableAllEndpoints(); + } + + + @ParameterizedTest(name = "{index}") + @MethodSource("asyncArangoProvider") + void retryPOSTAsync(ArangoDBAsync arangoDB) throws IOException, InterruptedException { + List endpoints = getEndpoints(); + + assertThat(serverIdPOST(arangoDB)).isEqualTo(endpoints.get(0).getServerId()); + + // slow down the driver connection + Latency toxic = getEndpoints().get(0).getProxy().toxics().latency("latency", ToxicDirection.DOWNSTREAM, 10_000); + Thread.sleep(100); + + getEndpoints().get(0).disable(300); + Throwable thrown = catchThrowable(() -> serverIdPOST(arangoDB)); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + assertThat(thrown.getCause()).isInstanceOf(IOException.class); + + assertThat(serverIdPOST(arangoDB)).isEqualTo(endpoints.get(1).getServerId()); + assertThat(serverIdPOST(arangoDB)).isEqualTo(endpoints.get(1).getServerId()); + + toxic.remove(); + enableAllEndpoints(); + } + +} diff --git a/test-resilience/src/test/java/resilience/loadbalance/LoadBalanceRoundRobinClusterTest.java b/test-resilience/src/test/java/resilience/loadbalance/LoadBalanceRoundRobinClusterTest.java new file mode 100644 index 000000000..1efb0305d --- /dev/null +++ b/test-resilience/src/test/java/resilience/loadbalance/LoadBalanceRoundRobinClusterTest.java @@ -0,0 +1,164 @@ +package resilience.loadbalance; + +import com.arangodb.ArangoDB; +import com.arangodb.ArangoDBAsync; +import com.arangodb.ArangoDBException; +import com.arangodb.entity.LoadBalancingStrategy; +import eu.rekawek.toxiproxy.model.ToxicDirection; +import eu.rekawek.toxiproxy.model.toxic.Latency; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; +import resilience.ClusterTest; +import resilience.Endpoint; + +import java.io.IOException; +import java.util.List; +import java.util.concurrent.ExecutionException; +import java.util.stream.Stream; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.catchThrowable; + +public class LoadBalanceRoundRobinClusterTest extends ClusterTest { + + static Stream arangoProvider() { + return builderProvider().map(it -> it.loadBalancingStrategy(LoadBalancingStrategy.ROUND_ROBIN).build()); + } + + static Stream asyncArangoProvider() { + return arangoProvider().map(ArangoDB::async); + } + + @ParameterizedTest(name = "{index}") + @MethodSource("arangoProvider") + void loadBalancing(ArangoDB arangoDB) { + List endpoints = getEndpoints(); + for (Endpoint endpoint : endpoints) { + assertThat(serverIdGET(arangoDB)).isEqualTo(endpoint.getServerId()); + } + assertThat(serverIdGET(arangoDB)).isEqualTo(endpoints.get(0).getServerId()); + } + + @ParameterizedTest(name = "{index}") + @MethodSource("asyncArangoProvider") + void loadBalancingAsync(ArangoDBAsync arangoDB) { + List endpoints = getEndpoints(); + for (Endpoint endpoint : endpoints) { + assertThat(serverIdGET(arangoDB)).isEqualTo(endpoint.getServerId()); + } + assertThat(serverIdGET(arangoDB)).isEqualTo(endpoints.get(0).getServerId()); + } + + @ParameterizedTest(name = "{index}") + @MethodSource("arangoProvider") + void failover(ArangoDB arangoDB) { + List endpoints = getEndpoints(); + endpoints.get(0).disableNow(); + assertThat(serverIdGET(arangoDB)).isEqualTo(endpoints.get(1).getServerId()); + endpoints.get(0).enable(); + assertThat(serverIdGET(arangoDB)).isEqualTo(endpoints.get(2).getServerId()); + assertThat(serverIdGET(arangoDB)).isEqualTo(endpoints.get(0).getServerId()); + } + + @ParameterizedTest(name = "{index}") + @MethodSource("asyncArangoProvider") + void failoverAsync(ArangoDBAsync arangoDB) { + List endpoints = getEndpoints(); + endpoints.get(0).disableNow(); + assertThat(serverIdGET(arangoDB)).isEqualTo(endpoints.get(1).getServerId()); + endpoints.get(0).enable(); + assertThat(serverIdGET(arangoDB)).isEqualTo(endpoints.get(2).getServerId()); + assertThat(serverIdGET(arangoDB)).isEqualTo(endpoints.get(0).getServerId()); + } + + @ParameterizedTest(name = "{index}") + @MethodSource("arangoProvider") + void retryGET(ArangoDB arangoDB) throws IOException, InterruptedException { + List endpoints = getEndpoints(); + + // slow down the driver connection + Latency toxic = getEndpoints().get(0).getProxy().toxics().latency("latency", ToxicDirection.DOWNSTREAM, 10_000); + Thread.sleep(100); + + getEndpoints().get(0).disable(300); + assertThat(serverIdGET(arangoDB)).isEqualTo(endpoints.get(1).getServerId()); + assertThat(serverIdGET(arangoDB)).isEqualTo(endpoints.get(2).getServerId()); + + toxic.remove(); + enableAllEndpoints(); + + assertThat(serverIdGET(arangoDB)).isEqualTo(endpoints.get(0).getServerId()); + } + + @ParameterizedTest(name = "{index}") + @MethodSource("asyncArangoProvider") + void retryGETAsync(ArangoDBAsync arangoDB) throws IOException, InterruptedException { + List endpoints = getEndpoints(); + + // slow down the driver connection + Latency toxic = getEndpoints().get(0).getProxy().toxics().latency("latency", ToxicDirection.DOWNSTREAM, 10_000); + Thread.sleep(100); + + getEndpoints().get(0).disable(300); + assertThat(serverIdGET(arangoDB)).isEqualTo(endpoints.get(1).getServerId()); + assertThat(serverIdGET(arangoDB)).isEqualTo(endpoints.get(2).getServerId()); + + toxic.remove(); + enableAllEndpoints(); + + assertThat(serverIdGET(arangoDB)).isEqualTo(endpoints.get(0).getServerId()); + } + + @ParameterizedTest(name = "{index}") + @MethodSource("arangoProvider") + void retryPOST(ArangoDB arangoDB) throws IOException, InterruptedException { + // create VST connections + for (int i = 0; i < getEndpoints().size(); i++) { + arangoDB.getVersion(); + } + + // slow down the driver connection + Latency toxic = getEndpoints().get(0).getProxy().toxics().latency("latency", ToxicDirection.DOWNSTREAM, 10_000); + Thread.sleep(100); + + getEndpoints().get(0).disable(300); + Throwable thrown = catchThrowable(() -> serverIdPOST(arangoDB)); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + assertThat(thrown.getCause()).isInstanceOf(IOException.class); + + assertThat(serverIdPOST(arangoDB)).isEqualTo(getEndpoints().get(1).getServerId()); + assertThat(serverIdPOST(arangoDB)).isEqualTo(getEndpoints().get(2).getServerId()); + + toxic.remove(); + enableAllEndpoints(); + + assertThat(serverIdPOST(arangoDB)).isEqualTo(getEndpoints().get(0).getServerId()); + } + + @ParameterizedTest(name = "{index}") + @MethodSource("asyncArangoProvider") + void retryPOSTAsync(ArangoDBAsync arangoDB) throws IOException, InterruptedException, ExecutionException { + // create VST connections + for (int i = 0; i < getEndpoints().size(); i++) { + arangoDB.getVersion().get(); + } + + // slow down the driver connection + Latency toxic = getEndpoints().get(0).getProxy().toxics().latency("latency", ToxicDirection.DOWNSTREAM, 10_000); + Thread.sleep(100); + + getEndpoints().get(0).disable(300); + Throwable thrown = catchThrowable(() -> serverIdPOST(arangoDB)); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + assertThat(thrown.getCause()).isInstanceOf(IOException.class); + + assertThat(serverIdPOST(arangoDB)).isEqualTo(getEndpoints().get(1).getServerId()); + assertThat(serverIdPOST(arangoDB)).isEqualTo(getEndpoints().get(2).getServerId()); + + toxic.remove(); + enableAllEndpoints(); + + assertThat(serverIdPOST(arangoDB)).isEqualTo(getEndpoints().get(0).getServerId()); + } + +} diff --git a/test-resilience/src/test/java/resilience/logging/RequestLoggingTest.java b/test-resilience/src/test/java/resilience/logging/RequestLoggingTest.java new file mode 100644 index 000000000..ce5d33386 --- /dev/null +++ b/test-resilience/src/test/java/resilience/logging/RequestLoggingTest.java @@ -0,0 +1,80 @@ +package resilience.logging; + +import ch.qos.logback.classic.Level; +import ch.qos.logback.classic.spi.ILoggingEvent; +import com.arangodb.ArangoDB; +import com.arangodb.Protocol; +import com.arangodb.internal.net.Communication; +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.ObjectMapper; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; +import resilience.SingleServerTest; + +import java.util.Collections; +import java.util.Map; + +import static org.assertj.core.api.Assertions.assertThat; + +public class RequestLoggingTest extends SingleServerTest { + private final static ObjectMapper mapper = new ObjectMapper(); + + public RequestLoggingTest() { + super(Collections.singletonMap(Communication.class, Level.DEBUG)); + } + + @ParameterizedTest + @MethodSource("protocolProvider") + void requestLogging(Protocol protocol) { + ArangoDB adb = new ArangoDB.Builder() + .host("172.28.0.1", 8529) + .password("test") + .protocol(protocol) + .build(); + + adb.db().query("RETURN \"hello\"", String.class).next(); + + assertThat(logs.getLogs()) + .filteredOn(it -> it.getLoggerName().equals(Communication.class.getName())) + .map(ILoggingEvent::getFormattedMessage) + .anySatisfy(it -> { + assertThat(it).contains("Send Request"); + assertThat(reqId(it)).isEqualTo(0); + assertThat(meta(it)) + .contains("requestType=POST") + .contains("database='_system'") + .contains("url='/_api/cursor'") + .doesNotContainIgnoringCase("authorization"); + assertThat(body(it)) + .containsEntry("query", "RETURN \"hello\""); + }) + .anySatisfy(it -> { + assertThat(it).contains("Received Response"); + assertThat(reqId(it)).isEqualTo(0); + assertThat(meta(it)).contains("statusCode=201"); + assertThat(body(it)) + .containsEntry("code", 201) + .containsEntry("result", Collections.singletonList("hello")); + }); + + adb.shutdown(); + } + + private Integer reqId(String log) { + return Integer.parseInt(log.substring(log.indexOf("[id=") + 4, log.indexOf("]"))); + } + + private String meta(String log) { + int endIdx = log.indexOf("} {") + 1; + if (endIdx == 0) { + endIdx = log.length(); + } + return log.substring(log.indexOf("]: ") + 3, endIdx); + } + + @SuppressWarnings("unchecked") + private Map body(String log) throws JsonProcessingException { + return mapper.readValue(log.substring(log.indexOf("} {") + 2), Map.class); + } + +} diff --git a/test-resilience/src/test/java/resilience/mock/SerdeTest.java b/test-resilience/src/test/java/resilience/mock/SerdeTest.java new file mode 100644 index 000000000..c0285b4be --- /dev/null +++ b/test-resilience/src/test/java/resilience/mock/SerdeTest.java @@ -0,0 +1,166 @@ +package resilience.mock; + +import ch.qos.logback.classic.Level; +import com.arangodb.ArangoDBException; +import com.arangodb.Request; +import com.arangodb.Response; +import com.arangodb.entity.MultiDocumentEntity; +import com.arangodb.util.RawJson; +import com.fasterxml.jackson.core.JsonParseException; +import com.fasterxml.jackson.databind.JsonNode; +import org.junit.jupiter.api.Test; +import resilience.MockTest; + +import java.nio.charset.StandardCharsets; +import java.util.Arrays; +import java.util.List; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.catchThrowable; +import static org.mockserver.model.HttpRequest.request; +import static org.mockserver.model.HttpResponse.response; + +public class SerdeTest extends MockTest { + + @Test + void unparsableData() { + arangoDB.getVersion(); + + mockServer + .when( + request() + .withMethod("GET") + .withPath("/.*/_api/version") + ) + .respond( + response() + .withStatusCode(504) + .withBody("upstream timed out") + ); + + logs.reset(); + Throwable thrown = catchThrowable(() -> arangoDB.getVersion()); + assertThat(thrown) + .isInstanceOf(ArangoDBException.class) + .hasMessageContaining("[Unparsable data]") + .hasMessageContaining("Response: {statusCode=504,"); + Throwable[] suppressed = thrown.getCause().getSuppressed(); + assertThat(suppressed).hasSize(1); + assertThat(suppressed[0]) + .isInstanceOf(ArangoDBException.class) + .cause() + .isInstanceOf(JsonParseException.class); + assertThat(logs.getLogs()) + .filteredOn(e -> e.getLevel().equals(Level.DEBUG)) + .anySatisfy(e -> assertThat(e.getFormattedMessage()) + .contains("Received Response") + .contains("statusCode=504") + .contains("[Unparsable data]") + ); + } + + @Test + void textPlainData() { + arangoDB.getVersion(); + + mockServer + .when( + request() + .withMethod("GET") + .withPath("/.*/_api/version") + ) + .respond( + response() + .withStatusCode(504) + .withHeader("Content-Type", "text/plain") + .withBody("upstream timed out") + ); + + Throwable thrown = catchThrowable(() -> arangoDB.getVersion()); + assertThat(thrown) + .isInstanceOf(ArangoDBException.class) + .hasMessageContaining("upstream timed out"); + } + + @Test + void textPlainDataWithCharset() { + arangoDB.getVersion(); + + mockServer + .when( + request() + .withMethod("GET") + .withPath("/.*/_api/version") + ) + .respond( + response() + .withStatusCode(504) + .withHeader("Content-Type", "text/plain; charset=utf-8") + .withBody("upstream timed out") + ); + + Throwable thrown = catchThrowable(() -> arangoDB.getVersion()); + assertThat(thrown) + .isInstanceOf(ArangoDBException.class) + .hasMessageContaining("upstream timed out"); + } + + @Test + void getDocumentsWithErrorField() { + List keys = Arrays.asList("1", "2", "3"); + + String resp = "[" + + "{\"error\":true,\"_key\":\"1\",\"_id\":\"col/1\",\"_rev\":\"_i4otI-q---\"}," + + "{\"_key\":\"2\",\"_id\":\"col/2\",\"_rev\":\"_i4otI-q--_\"}," + + "{\"_key\":\"3\",\"_id\":\"col/3\",\"_rev\":\"_i4otI-q--A\"}" + + "]"; + + mockServer + .when( + request() + .withMethod("PUT") + .withPath("/.*/_api/document/col") + .withQueryStringParameter("onlyget", "true") + ) + .respond( + response() + .withStatusCode(200) + .withHeader("Content-Type", "application/json; charset=utf-8") + .withBody(resp.getBytes(StandardCharsets.UTF_8)) + ); + + MultiDocumentEntity res = arangoDB.db().collection("col").getDocuments(keys, JsonNode.class); + assertThat(res.getErrors()).isEmpty(); + assertThat(res.getDocuments()).hasSize(3) + .anySatisfy(d -> assertThat(d.get("_key").textValue()).isEqualTo("1")) + .anySatisfy(d -> assertThat(d.get("_key").textValue()).isEqualTo("2")) + .anySatisfy(d -> assertThat(d.get("_key").textValue()).isEqualTo("3")); + } + + @Test + void getXArangoDumpJsonLines() { + String resp = "{\"a\":1}\n" + + "{\"b\":2}\n" + + "{\"c\":3}"; + + mockServer + .when( + request() + .withMethod("GET") + .withPath("/_db/foo/_api/foo") + ) + .respond( + response() + .withStatusCode(200) + .withHeader("Content-Type", "application/x-arango-dump; charset=utf-8") + .withBody(resp.getBytes(StandardCharsets.UTF_8)) + ); + + Response res = arangoDB.execute(Request.builder() + .method(Request.Method.GET) + .db("foo") + .path("/_api/foo") + .build(), RawJson.class); + assertThat(res.getBody().get()).endsWith("{\"c\":3}"); + } +} diff --git a/test-resilience/src/test/java/resilience/mock/ServiceUnavailableTest.java b/test-resilience/src/test/java/resilience/mock/ServiceUnavailableTest.java new file mode 100644 index 000000000..358311cf4 --- /dev/null +++ b/test-resilience/src/test/java/resilience/mock/ServiceUnavailableTest.java @@ -0,0 +1,65 @@ +package resilience.mock; + +import ch.qos.logback.classic.Level; +import org.junit.jupiter.api.Test; +import org.mockserver.matchers.Times; +import resilience.MockTest; + +import java.util.concurrent.ExecutionException; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockserver.model.HttpRequest.request; +import static org.mockserver.model.HttpResponse.response; + +class ServiceUnavailableTest extends MockTest { + + @Test + void retryOn503() { + arangoDB.getVersion(); + + mockServer + .when( + request() + .withMethod("GET") + .withPath("/.*/_api/version"), + Times.exactly(2) + ) + .respond( + response() + .withStatusCode(503) + .withBody("{\"error\":true,\"errorNum\":503,\"errorMessage\":\"boom\",\"code\":503}") + ); + + logs.reset(); + arangoDB.getVersion(); + assertThat(logs.getLogs()) + .filteredOn(e -> e.getLevel().equals(Level.WARN)) + .anyMatch(e -> e.getFormattedMessage().contains("Could not connect to host")); + } + + @Test + void retryOn503Async() throws ExecutionException, InterruptedException { + arangoDB.async().getVersion().get(); + + mockServer + .when( + request() + .withMethod("GET") + .withPath("/.*/_api/version"), + Times.exactly(2) + ) + .respond( + response() + .withStatusCode(503) + .withBody("{\"error\":true,\"errorNum\":503,\"errorMessage\":\"boom\",\"code\":503}") + ); + + logs.reset(); + arangoDB.async().getVersion().get(); + assertThat(logs.getLogs()) + .filteredOn(e -> e.getLevel().equals(Level.WARN)) + .anyMatch(e -> e.getFormattedMessage().contains("Could not connect to host")); + } + + +} diff --git a/test-resilience/src/test/java/resilience/protocol/ProtocolTest.java b/test-resilience/src/test/java/resilience/protocol/ProtocolTest.java new file mode 100644 index 000000000..a242b76a9 --- /dev/null +++ b/test-resilience/src/test/java/resilience/protocol/ProtocolTest.java @@ -0,0 +1,70 @@ +package resilience.protocol; + +import ch.qos.logback.classic.Level; +import com.arangodb.ArangoDB; +import com.arangodb.Protocol; +import com.arangodb.vst.internal.VstConnection; +import io.netty.handler.codec.http2.Http2FrameLogger; +import io.netty.handler.logging.LoggingHandler; +import org.junit.jupiter.api.*; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; +import resilience.TestUtils; +import resilience.utils.MemoryAppender; + +import java.util.HashMap; +import java.util.Map; +import java.util.stream.Stream; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + +public class ProtocolTest extends TestUtils { + private static final Map, Level> logLevels = new HashMap<>(); + + static { + logLevels.put(VstConnection.class, Level.DEBUG); + logLevels.put(LoggingHandler.class, Level.DEBUG); + logLevels.put(Http2FrameLogger.class, Level.DEBUG); + } + + private MemoryAppender logs; + + public ProtocolTest() { + super(logLevels); + } + + @BeforeEach + void init() { + logs = new MemoryAppender(); + } + + @AfterEach + void shutdown() { + logs.stop(); + } + + static Stream args() { + return Stream.of( + Arguments.of(Protocol.VST, "VstConnection"), + Arguments.of(Protocol.HTTP_JSON, "LoggingHandler"), + Arguments.of(Protocol.HTTP2_JSON, "Http2FrameLogger") + ); + } + + @ParameterizedTest + @MethodSource("args") + void shouldUseConfiguredProtocol(Protocol p, String expectedLog) { + assumeTrue(!p.equals(Protocol.VST) || isLessThanVersion(3, 12)); + ArangoDB adb = new ArangoDB.Builder() + .host("172.28.0.1", 8529) + .password("test") + .protocol(p) + .build(); + adb.getVersion(); + assertThat(logs.getLogs()).anyMatch(it -> it.getLoggerName().contains(expectedLog)); + adb.shutdown(); + } + +} diff --git a/test-resilience/src/test/java/resilience/retry/RetriableCursorClusterTest.java b/test-resilience/src/test/java/resilience/retry/RetriableCursorClusterTest.java new file mode 100644 index 000000000..c9802933c --- /dev/null +++ b/test-resilience/src/test/java/resilience/retry/RetriableCursorClusterTest.java @@ -0,0 +1,92 @@ +package resilience.retry; + +import com.arangodb.*; +import com.arangodb.model.AqlQueryOptions; +import eu.rekawek.toxiproxy.model.ToxicDirection; +import eu.rekawek.toxiproxy.model.toxic.Latency; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; +import resilience.ClusterTest; + +import java.io.IOException; +import java.util.concurrent.ExecutionException; +import java.util.stream.Stream; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.catchThrowable; + +/** + * @author Michele Rastelli + */ +class RetriableCursorClusterTest extends ClusterTest { + + static Stream arangoProvider() { + return builderProvider().map(it -> it.timeout(1_000).build()); + } + + static Stream asyncArangoProvider() { + return arangoProvider().map(ArangoDB::async); + } + + @ParameterizedTest(name = "{index}") + @MethodSource("arangoProvider") + void retryCursor(ArangoDB arangoDB) throws IOException, InterruptedException { + + ArangoCursor cursor = arangoDB.db() + .query("for i in 1..2 return i", + String.class, + new AqlQueryOptions().batchSize(1).allowRetry(true)); + + assertThat(cursor.hasNext()).isTrue(); + assertThat(cursor.next()).isEqualTo("1"); + assertThat(cursor.hasNext()).isTrue(); + + // slow down the driver connection + Latency toxic = getEndpoints().get(0).getProxy().toxics().latency("latency", ToxicDirection.DOWNSTREAM, 10_000); + Thread.sleep(100); + + getEndpoints().get(0).disable(300); + + Throwable thrown = catchThrowable(cursor::next); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + assertThat(thrown.getCause()).isInstanceOf(IOException.class); + + assertThat(cursor.next()).isEqualTo("2"); + assertThat(cursor.hasNext()).isFalse(); + + toxic.remove(); + enableAllEndpoints(); + arangoDB.shutdown(); + } + + @ParameterizedTest(name = "{index}") + @MethodSource("asyncArangoProvider") + void retryCursorAsync(ArangoDBAsync arangoDB) throws IOException, InterruptedException, ExecutionException { + + ArangoCursorAsync cursor = arangoDB.db() + .query("for i in 1..2 return i", + String.class, + new AqlQueryOptions().batchSize(1).allowRetry(true)).get(); + + assertThat(cursor.getResult()).containsExactly("1"); + assertThat(cursor.hasMore()).isTrue(); + + // slow down the driver connection + Latency toxic = getEndpoints().get(0).getProxy().toxics().latency("latency", ToxicDirection.DOWNSTREAM, 10_000); + Thread.sleep(100); + + getEndpoints().get(0).disable(300); + + Throwable thrown = catchThrowable(() -> cursor.nextBatch().get()).getCause(); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + assertThat(thrown.getCause()).isInstanceOf(IOException.class); + + ArangoCursorAsync c2 = cursor.nextBatch().get(); + assertThat(c2.getResult()).containsExactly("2"); + assertThat(c2.hasMore()).isFalse(); + + toxic.remove(); + enableAllEndpoints(); + arangoDB.shutdown(); + } +} diff --git a/test-resilience/src/test/java/resilience/retry/RetriableCursorTest.java b/test-resilience/src/test/java/resilience/retry/RetriableCursorTest.java new file mode 100644 index 000000000..b440e8155 --- /dev/null +++ b/test-resilience/src/test/java/resilience/retry/RetriableCursorTest.java @@ -0,0 +1,79 @@ +package resilience.retry; + +import com.arangodb.*; +import com.arangodb.model.AqlQueryOptions; +import eu.rekawek.toxiproxy.model.ToxicDirection; +import eu.rekawek.toxiproxy.model.toxic.Latency; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; +import resilience.SingleServerTest; + +import java.io.IOException; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeoutException; +import java.util.stream.Stream; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.catchThrowable; + +/** + * @author Michele Rastelli + */ +class RetriableCursorTest extends SingleServerTest { + + static Stream arangoProvider() { + return builderProvider().map(it -> it.timeout(500).build()); + } + + static Stream asyncArangoProvider() { + return arangoProvider().map(ArangoDB::async); + } + + @ParameterizedTest(name = "{index}") + @MethodSource("arangoProvider") + void retryCursor(ArangoDB arangoDB) throws IOException, InterruptedException { + try (ArangoCursor cursor = arangoDB.db() + .query("for i in 1..2 return i", + String.class, + new AqlQueryOptions().batchSize(1).allowRetry(true))) { + + assertThat(cursor.hasNext()).isTrue(); + assertThat(cursor.next()).isEqualTo("1"); + assertThat(cursor.hasNext()).isTrue(); + Latency toxic = getEndpoint().getProxy().toxics().latency("latency", ToxicDirection.DOWNSTREAM, 10_000); + Thread.sleep(100); + Throwable thrown = catchThrowable(cursor::next); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + assertThat(thrown.getCause()).isInstanceOfAny(TimeoutException.class); + toxic.remove(); + assertThat(cursor.next()).isEqualTo("2"); + assertThat(cursor.hasNext()).isFalse(); + } + arangoDB.shutdown(); + } + + @ParameterizedTest(name = "{index}") + @MethodSource("asyncArangoProvider") + void retryCursorAsync(ArangoDBAsync arangoDB) throws IOException, ExecutionException, InterruptedException { + ArangoCursorAsync c1 = arangoDB.db() + .query("for i in 1..2 return i", + String.class, + new AqlQueryOptions().batchSize(1).allowRetry(true)).get(); + + assertThat(c1.getResult()).containsExactly("1"); + assertThat(c1.hasMore()).isTrue(); + Latency toxic = getEndpoint().getProxy().toxics().latency("latency", ToxicDirection.DOWNSTREAM, 10_000); + Thread.sleep(100); + Throwable thrown = catchThrowable(() -> c1.nextBatch().get()).getCause(); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + assertThat(thrown.getCause()).isInstanceOfAny(TimeoutException.class); + toxic.remove(); + Thread.sleep(100); + ArangoCursorAsync c2 = c1.nextBatch().get(); + assertThat(c2.getResult()).containsExactly("2"); + assertThat(c2.hasMore()).isFalse(); + c2.close(); + arangoDB.shutdown(); + } + +} diff --git a/test-resilience/src/test/java/resilience/retry/RetryClusterTest.java b/test-resilience/src/test/java/resilience/retry/RetryClusterTest.java new file mode 100644 index 000000000..d2f44eba2 --- /dev/null +++ b/test-resilience/src/test/java/resilience/retry/RetryClusterTest.java @@ -0,0 +1,247 @@ +package resilience.retry; + +import ch.qos.logback.classic.Level; +import com.arangodb.*; +import eu.rekawek.toxiproxy.model.ToxicDirection; +import eu.rekawek.toxiproxy.model.toxic.Latency; +import io.vertx.core.http.HttpClosedException; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; +import resilience.ClusterTest; + +import java.io.IOException; +import java.net.ConnectException; +import java.util.concurrent.*; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.catchThrowable; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + +/** + * @author Michele Rastelli + */ +class RetryClusterTest extends ClusterTest { + + /** + * on reconnection failure: - 3x logs WARN Could not connect to host[addr=127.0.0.1,port=8529] - + * ArangoDBException("Cannot contact any host") + *

+ * once the proxy is re-enabled: - the subsequent requests should be successful + */ + @ParameterizedTest(name = "{index}") + @MethodSource("adbProvider") + void unreachableHost(ArangoDB arangoDB) { + arangoDB.getVersion(); + disableAllEndpoints(); + + for (int i = 0; i < 10; i++) { + Throwable thrown = catchThrowable(arangoDB::getVersion); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + assertThat(thrown.getMessage()).contains("Cannot contact any host"); + assertThat(thrown.getCause()).isNotNull(); + assertThat(thrown.getCause()).isInstanceOf(ArangoDBMultipleException.class); + ((ArangoDBMultipleException) thrown.getCause()).getExceptions().forEach(e -> + assertThat(e).isInstanceOf(ConnectException.class)); + } + + long warnsCount = logs.getLogs() + .filter(e -> e.getLevel().equals(Level.WARN)) + .filter(e -> e.getFormattedMessage().contains("Could not connect to host[addr=127.0.0.1,port=18529]")) + .count(); + assertThat(warnsCount).isGreaterThanOrEqualTo(3); + + enableAllEndpoints(); + arangoDB.getVersion(); + arangoDB.shutdown(); + } + + /** + * on reconnection failure: - 3x logs WARN Could not connect to host[addr=127.0.0.1,port=8529] - + * ArangoDBException("Cannot contact any host") + *

+ * once the proxy is re-enabled: - the subsequent requests should be successful + */ + @ParameterizedTest(name = "{index}") + @MethodSource("asyncAdbProvider") + void unreachableHostAsync(ArangoDBAsync arangoDB) throws ExecutionException, InterruptedException { + arangoDB.getVersion().get(); + disableAllEndpoints(); + + for (int i = 0; i < 10; i++) { + Throwable thrown = catchThrowable(() -> arangoDB.getVersion().get()).getCause(); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + assertThat(thrown.getMessage()).contains("Cannot contact any host"); + assertThat(thrown.getCause()).isNotNull(); + assertThat(thrown.getCause()).isInstanceOf(ArangoDBMultipleException.class); + ((ArangoDBMultipleException) thrown.getCause()).getExceptions().forEach(e -> + assertThat(e).isInstanceOf(ConnectException.class)); + } + + long warnsCount = logs.getLogs() + .filter(e -> e.getLevel().equals(Level.WARN)) + .filter(e -> e.getFormattedMessage().contains("Could not connect to host[addr=127.0.0.1,port=18529]")) + .count(); + assertThat(warnsCount).isGreaterThanOrEqualTo(3); + + enableAllEndpoints(); + arangoDB.getVersion().get(); + arangoDB.shutdown(); + } + + @ParameterizedTest(name = "{index}") + @MethodSource("adbProvider") + void unreachableHostFailover(ArangoDB arangoDB) { + arangoDB.getVersion(); + getEndpoints().get(0).disableNow(); + getEndpoints().get(1).disableNow(); + + arangoDB.getVersion(); + + assertThat(logs.getLogs()) + .filteredOn(e -> e.getLevel().equals(Level.WARN)) + .anyMatch(e -> e.getFormattedMessage().contains("Could not connect to host")); + + enableAllEndpoints(); + arangoDB.shutdown(); + } + + @ParameterizedTest(name = "{index}") + @MethodSource("asyncAdbProvider") + void unreachableHostFailoverAsync(ArangoDBAsync arangoDB) throws ExecutionException, InterruptedException { + arangoDB.getVersion().get(); + getEndpoints().get(0).disableNow(); + getEndpoints().get(1).disableNow(); + + arangoDB.getVersion().get(); + + assertThat(logs.getLogs()) + .filteredOn(e -> e.getLevel().equals(Level.WARN)) + .anyMatch(e -> e.getFormattedMessage().contains("Could not connect to host")); + + enableAllEndpoints(); + arangoDB.shutdown(); + } + + + @ParameterizedTest + @MethodSource("protocolProvider") + void retryGetOnClosedConnection(Protocol protocol) throws IOException, InterruptedException { + assumeTrue(protocol != Protocol.VST); + ArangoDB arangoDB = dbBuilder() + .protocol(protocol) + .build(); + + arangoDB.getVersion(); + + // slow down the driver connection + Latency toxic = getEndpoints().get(0).getProxy().toxics().latency("latency", ToxicDirection.DOWNSTREAM, 10_000); + Thread.sleep(100); + + getEndpoints().get(0).disable(300); + arangoDB.getVersion(); + + assertThat(logs.getLogs()) + .filteredOn(e -> e.getLevel().equals(Level.WARN)) + .anyMatch(e -> e.getFormattedMessage().contains("Could not connect to host")); + + toxic.remove(); + enableAllEndpoints(); + arangoDB.shutdown(); + } + + @ParameterizedTest + @MethodSource("protocolProvider") + void retryGetOnClosedConnectionAsync(Protocol protocol) throws IOException, InterruptedException, ExecutionException { + assumeTrue(protocol != Protocol.VST); + ArangoDBAsync arangoDB = dbBuilder() + .protocol(protocol) + .build() + .async(); + + arangoDB.getVersion().get(); + + // slow down the driver connection + Latency toxic = getEndpoints().get(0).getProxy().toxics().latency("latency", ToxicDirection.DOWNSTREAM, 10_000); + Thread.sleep(100); + + getEndpoints().get(0).disable(300); + arangoDB.getVersion().get(); + + assertThat(logs.getLogs()) + .filteredOn(e -> e.getLevel().equals(Level.WARN)) + .anyMatch(e -> e.getFormattedMessage().contains("Could not connect to host")); + + toxic.remove(); + enableAllEndpoints(); + arangoDB.shutdown(); + } + + + /** + * on closed pending requests of unsafe HTTP methods: - no retry should happen + *

+ * the subsequent requests should fail over to a different coordinator and be successful + */ + @ParameterizedTest + @MethodSource("protocolProvider") + void notRetryPostOnClosedConnection(Protocol protocol) throws IOException, InterruptedException { + ArangoDB arangoDB = dbBuilder() + .protocol(protocol) + .build(); + + arangoDB.db().query("return null", Void.class); + + // slow down the driver connection + Latency toxic = getEndpoints().get(0).getProxy().toxics().latency("latency", ToxicDirection.DOWNSTREAM, 10_000); + Thread.sleep(100); + + getEndpoints().get(0).disable(300); + Throwable thrown = catchThrowable(() -> arangoDB.db().query("return null", Void.class)); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + assertThat(thrown.getCause()).isInstanceOf(IOException.class); + if (protocol != Protocol.VST) { + assertThat(thrown.getCause().getCause()).isInstanceOf(HttpClosedException.class); + } + + arangoDB.db().query("return null", Void.class); + + toxic.remove(); + enableAllEndpoints(); + arangoDB.shutdown(); + } + + /** + * on closed pending requests of unsafe HTTP methods: - no retry should happen + *

+ * the subsequent requests should fail over to a different coordinator and be successful + */ + @ParameterizedTest + @MethodSource("protocolProvider") + void notRetryPostOnClosedConnectionAsync(Protocol protocol) throws IOException, InterruptedException, ExecutionException { + ArangoDBAsync arangoDB = dbBuilder() + .protocol(protocol) + .build() + .async(); + + arangoDB.db().query("return null", Void.class).get(); + + // slow down the driver connection + Latency toxic = getEndpoints().get(0).getProxy().toxics().latency("latency", ToxicDirection.DOWNSTREAM, 10_000); + Thread.sleep(100); + + getEndpoints().get(0).disable(300); + Throwable thrown = catchThrowable(() -> arangoDB.db().query("return null", Void.class).get()).getCause(); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + assertThat(thrown.getCause()).isInstanceOf(IOException.class); + if (protocol != Protocol.VST) { + assertThat(thrown.getCause().getCause()).isInstanceOf(HttpClosedException.class); + } + + arangoDB.db().query("return null", Void.class).get(); + + toxic.remove(); + enableAllEndpoints(); + arangoDB.shutdown(); + } + +} diff --git a/test-resilience/src/test/java/resilience/retry/RetryTest.java b/test-resilience/src/test/java/resilience/retry/RetryTest.java new file mode 100644 index 000000000..81875c4aa --- /dev/null +++ b/test-resilience/src/test/java/resilience/retry/RetryTest.java @@ -0,0 +1,315 @@ +package resilience.retry; + +import ch.qos.logback.classic.Level; +import com.arangodb.*; +import io.vertx.core.http.HttpClosedException; +import resilience.SingleServerTest; +import eu.rekawek.toxiproxy.model.ToxicDirection; +import eu.rekawek.toxiproxy.model.toxic.Latency; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; + +import java.io.IOException; +import java.net.ConnectException; +import java.util.List; +import java.util.concurrent.*; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.catchThrowable; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + +/** + * @author Michele Rastelli + */ +class RetryTest extends SingleServerTest { + + /** + * on reconnection failure: - 3x logs WARN Could not connect to host[addr=127.0.0.1,port=8529] - + * ArangoDBException("Cannot contact any host") + *

+ * once the proxy is re-enabled: - the subsequent requests should be successful + */ + @ParameterizedTest(name = "{index}") + @MethodSource("adbProvider") + void unreachableHost(ArangoDB arangoDB) { + arangoDB.getVersion(); + getEndpoint().disableNow(); + + for (int i = 0; i < 10; i++) { + Throwable thrown = catchThrowable(arangoDB::getVersion); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + assertThat(thrown.getMessage()).contains("Cannot contact any host"); + assertThat(thrown.getCause()).isNotNull(); + assertThat(thrown.getCause()).isInstanceOf(ArangoDBMultipleException.class); + ((ArangoDBMultipleException) thrown.getCause()).getExceptions().forEach(e -> + assertThat(e).isInstanceOf(ConnectException.class)); + } + + long warnsCount = logs.getLogs() + .filter(e -> e.getLevel().equals(Level.WARN)) + .filter(e -> e.getFormattedMessage().contains("Could not connect to host[addr=127.0.0.1,port=18529]")) + .count(); + assertThat(warnsCount).isGreaterThanOrEqualTo(3); + + getEndpoint().enable(); + arangoDB.getVersion(); + arangoDB.shutdown(); + } + + /** + * on reconnection failure: - 3x logs WARN Could not connect to host[addr=127.0.0.1,port=8529] - + * ArangoDBException("Cannot contact any host") + *

+ * once the proxy is re-enabled: - the subsequent requests should be successful + */ + @ParameterizedTest(name = "{index}") + @MethodSource("asyncAdbProvider") + void unreachableHostAsync(ArangoDBAsync arangoDB) throws ExecutionException, InterruptedException { + arangoDB.getVersion().get(); + getEndpoint().disableNow(); + + for (int i = 0; i < 10; i++) { + Throwable thrown = catchThrowable(() -> arangoDB.getVersion().get()).getCause(); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + assertThat(thrown.getMessage()).contains("Cannot contact any host"); + assertThat(thrown.getCause()).isNotNull(); + assertThat(thrown.getCause()).isInstanceOf(ArangoDBMultipleException.class); + ((ArangoDBMultipleException) thrown.getCause()).getExceptions().forEach(e -> + assertThat(e).isInstanceOf(ConnectException.class)); + } + + long warnsCount = logs.getLogs() + .filter(e -> e.getLevel().equals(Level.WARN)) + .filter(e -> e.getFormattedMessage().contains("Could not connect to host[addr=127.0.0.1,port=18529]")) + .count(); + assertThat(warnsCount).isGreaterThanOrEqualTo(3); + + getEndpoint().enable(); + arangoDB.getVersion().get(); + arangoDB.shutdown(); + } + + /** + * on delayed response: + * - ArangoDBException with cause TimeoutException + *

+ * once the delay is removed: + * - the subsequent requests should be successful + */ + @ParameterizedTest + @MethodSource("protocolProvider") + void connectionTimeout(Protocol protocol) throws IOException, InterruptedException { + ArangoDB arangoDB = dbBuilder() + .timeout(500) + .protocol(protocol) + .build(); + + arangoDB.getVersion(); + + // slow down the driver connection + Latency toxic = getEndpoint().getProxy().toxics().latency("latency", ToxicDirection.DOWNSTREAM, 10_000); + Thread.sleep(100); + + Throwable thrown = catchThrowable(arangoDB::getVersion); + assertThat(thrown) + .isInstanceOf(ArangoDBException.class) + .extracting(Throwable::getCause) + .isInstanceOf(TimeoutException.class); + + toxic.remove(); + Thread.sleep(100); + + arangoDB.getVersion(); + arangoDB.shutdown(); + } + + /** + * on delayed response: + * - ArangoDBException with cause TimeoutException + *

+ * once the delay is removed: + * - the subsequent requests should be successful + */ + @ParameterizedTest + @MethodSource("protocolProvider") + void connectionTimeoutAsync(Protocol protocol) throws IOException, InterruptedException, ExecutionException { + ArangoDBAsync arangoDB = dbBuilder() + .timeout(500) + .protocol(protocol) + .build() + .async(); + + arangoDB.getVersion().get(); + + // slow down the driver connection + Latency toxic = getEndpoint().getProxy().toxics().latency("latency", ToxicDirection.DOWNSTREAM, 10_000); + Thread.sleep(100); + + Throwable thrown = catchThrowable(() -> arangoDB.getVersion().get()).getCause(); + assertThat(thrown) + .isInstanceOf(ArangoDBException.class) + .extracting(Throwable::getCause) + .isInstanceOf(TimeoutException.class); + + toxic.remove(); + Thread.sleep(100); + + arangoDB.getVersion().get(); + arangoDB.shutdown(); + } + + + /** + * on closed pending requests of safe HTTP methods: + *

+ * - retry 3 times + * - ArangoDBMultipleException with 3 exceptions + *

+ * once restored: + *

+ * - the subsequent requests should be successful + */ + @ParameterizedTest + @MethodSource("protocolProvider") + void retryGetOnClosedConnection(Protocol protocol) throws IOException, InterruptedException { + assumeTrue(protocol != Protocol.VST); + ArangoDB arangoDB = dbBuilder() + .protocol(protocol) + .build(); + + arangoDB.getVersion(); + + // slow down the driver connection + Latency toxic = getEndpoint().getProxy().toxics().latency("latency", ToxicDirection.DOWNSTREAM, 10_000); + Thread.sleep(100); + + getEndpoint().disable(300); + Throwable thrown = catchThrowable(arangoDB::getVersion); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + assertThat(thrown.getCause()).isInstanceOf(ArangoDBMultipleException.class); + List exceptions = ((ArangoDBMultipleException) thrown.getCause()).getExceptions(); + assertThat(exceptions).hasSize(3); + assertThat(exceptions.get(0)).isInstanceOf(IOException.class); + assertThat(exceptions.get(0).getCause()).isInstanceOf(HttpClosedException.class); + assertThat(exceptions.get(1)).isInstanceOf(ConnectException.class); + assertThat(exceptions.get(2)).isInstanceOf(ConnectException.class); + + toxic.remove(); + getEndpoint().enable(); + + arangoDB.getVersion(); + arangoDB.shutdown(); + } + + /** + * on closed pending requests of safe HTTP methods: + *

+ * - retry 3 times + * - ArangoDBMultipleException with 3 exceptions + *

+ * once restored: + *

+ * - the subsequent requests should be successful + */ + @ParameterizedTest + @MethodSource("protocolProvider") + void retryGetOnClosedConnectionAsync(Protocol protocol) throws IOException, InterruptedException, ExecutionException { + assumeTrue(protocol != Protocol.VST); + ArangoDBAsync arangoDB = dbBuilder() + .protocol(protocol) + .build() + .async(); + + arangoDB.getVersion().get(); + + // slow down the driver connection + Latency toxic = getEndpoint().getProxy().toxics().latency("latency", ToxicDirection.DOWNSTREAM, 10_000); + Thread.sleep(100); + + getEndpoint().disable(300); + Throwable thrown = catchThrowable(() -> arangoDB.getVersion().get()).getCause(); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + assertThat(thrown.getCause()).isInstanceOf(ArangoDBMultipleException.class); + List exceptions = ((ArangoDBMultipleException) thrown.getCause()).getExceptions(); + assertThat(exceptions).hasSize(3); + assertThat(exceptions.get(0)).isInstanceOf(IOException.class); + assertThat(exceptions.get(0).getCause()).isInstanceOf(HttpClosedException.class); + assertThat(exceptions.get(1)).isInstanceOf(ConnectException.class); + assertThat(exceptions.get(2)).isInstanceOf(ConnectException.class); + + toxic.remove(); + getEndpoint().enable(); + + arangoDB.getVersion().get(); + arangoDB.shutdown(); + } + + + /** + * on closed pending requests of unsafe HTTP methods: - no retry should happen + *

+ * once restored: - the subsequent requests should be successful + */ + @ParameterizedTest + @MethodSource("protocolProvider") + void notRetryPostOnClosedConnection(Protocol protocol) throws IOException, InterruptedException { + ArangoDB arangoDB = dbBuilder() + .protocol(protocol) + .build(); + + arangoDB.db().query("return null", Void.class); + + // slow down the driver connection + Latency toxic = getEndpoint().getProxy().toxics().latency("latency", ToxicDirection.DOWNSTREAM, 10_000); + Thread.sleep(100); + + getEndpoint().disable(300); + Throwable thrown = catchThrowable(() -> arangoDB.db().query("return null", Void.class)); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + assertThat(thrown.getCause()).isInstanceOf(IOException.class); + if (protocol != Protocol.VST) { + assertThat(thrown.getCause().getCause()).isInstanceOf(HttpClosedException.class); + } + + toxic.remove(); + getEndpoint().enable(); + + arangoDB.db().query("return null", Void.class); + arangoDB.shutdown(); + } + + /** + * on closed pending requests of unsafe HTTP methods: - no retry should happen + *

+ * once restored: - the subsequent requests should be successful + */ + @ParameterizedTest + @MethodSource("protocolProvider") + void notRetryPostOnClosedConnectionAsync(Protocol protocol) throws IOException, InterruptedException, ExecutionException { + ArangoDBAsync arangoDB = dbBuilder() + .protocol(protocol) + .build() + .async(); + + arangoDB.db().query("return null", Void.class).get(); + + // slow down the driver connection + Latency toxic = getEndpoint().getProxy().toxics().latency("latency", ToxicDirection.DOWNSTREAM, 10_000); + Thread.sleep(100); + + getEndpoint().disable(300); + Throwable thrown = catchThrowable(() -> arangoDB.db().query("return null", Void.class).get()).getCause(); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + assertThat(thrown.getCause()).isInstanceOf(IOException.class); + if (protocol != Protocol.VST) { + assertThat(thrown.getCause().getCause()).isInstanceOf(HttpClosedException.class); + } + + toxic.remove(); + getEndpoint().enable(); + + arangoDB.db().query("return null", Void.class).get(); + arangoDB.shutdown(); + } + +} diff --git a/test-resilience/src/test/java/resilience/shutdown/ShutdownClusterTest.java b/test-resilience/src/test/java/resilience/shutdown/ShutdownClusterTest.java new file mode 100644 index 000000000..0ff83e7c6 --- /dev/null +++ b/test-resilience/src/test/java/resilience/shutdown/ShutdownClusterTest.java @@ -0,0 +1,93 @@ +package resilience.shutdown; + +import com.arangodb.ArangoDB; +import com.arangodb.ArangoDBAsync; +import com.arangodb.ArangoDBException; +import com.arangodb.Protocol; +import io.vertx.core.http.HttpClosedException; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; +import resilience.ClusterTest; + +import java.io.IOException; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.catchThrowable; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + +/** + * @author Michele Rastelli + */ +class ShutdownClusterTest extends ClusterTest { + + @ParameterizedTest + @MethodSource("protocolProvider") + void shutdown(Protocol protocol) throws InterruptedException { + ArangoDB arangoDB = dbBuilder() + .protocol(protocol) + .build(); + + arangoDB.getVersion(); + arangoDB.shutdown(); + Thread.sleep(500); + Throwable thrown = catchThrowable(arangoDB::getVersion); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + assertThat(thrown.getMessage()).contains("closed"); + } + + @ParameterizedTest + @MethodSource("protocolProvider") + void shutdownAsync(Protocol protocol) throws InterruptedException, ExecutionException { + ArangoDBAsync arangoDB = dbBuilder() + .protocol(protocol) + .build() + .async(); + + arangoDB.getVersion().get(); + arangoDB.shutdown(); + Thread.sleep(500); + Throwable thrown = catchThrowable(() -> arangoDB.getVersion().get()).getCause(); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + assertThat(thrown.getMessage()).contains("closed"); + } + + @ParameterizedTest + @MethodSource("protocolProvider") + void shutdownWithPendingRequests(Protocol protocol) { + assumeTrue(protocol != Protocol.VST); + ArangoDB arangoDB = dbBuilder() + .protocol(protocol) + .build(); + + ScheduledExecutorService es = Executors.newSingleThreadScheduledExecutor(); + es.schedule(arangoDB::shutdown, 500, TimeUnit.MILLISECONDS); + Throwable thrown = catchThrowable(() -> arangoDB.db().query("return sleep(1)", Void.class)); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + assertThat(thrown.getCause()).isInstanceOf(IOException.class); + assertThat(thrown.getCause().getCause()).isInstanceOf(HttpClosedException.class); + es.shutdown(); + } + + @ParameterizedTest + @MethodSource("protocolProvider") + void shutdownWithPendingRequestsAsync(Protocol protocol) { + assumeTrue(protocol != Protocol.VST); + ArangoDBAsync arangoDB = dbBuilder() + .protocol(protocol) + .build() + .async(); + + ScheduledExecutorService es = Executors.newSingleThreadScheduledExecutor(); + es.schedule(arangoDB::shutdown, 500, TimeUnit.MILLISECONDS); + Throwable thrown = catchThrowable(() -> arangoDB.db().query("return sleep(1)", Void.class).get()).getCause(); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + assertThat(thrown.getCause()).isInstanceOf(IOException.class); + assertThat(thrown.getCause().getCause()).isInstanceOf(HttpClosedException.class); + es.shutdown(); + } + +} diff --git a/test-resilience/src/test/java/resilience/shutdown/ShutdownTest.java b/test-resilience/src/test/java/resilience/shutdown/ShutdownTest.java new file mode 100644 index 000000000..4132f6036 --- /dev/null +++ b/test-resilience/src/test/java/resilience/shutdown/ShutdownTest.java @@ -0,0 +1,93 @@ +package resilience.shutdown; + +import com.arangodb.ArangoDB; +import com.arangodb.ArangoDBAsync; +import com.arangodb.ArangoDBException; +import com.arangodb.Protocol; +import io.vertx.core.http.HttpClosedException; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; +import resilience.SingleServerTest; + +import java.io.IOException; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.catchThrowable; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + +/** + * @author Michele Rastelli + */ +class ShutdownTest extends SingleServerTest { + + @ParameterizedTest + @MethodSource("protocolProvider") + void shutdown(Protocol protocol) throws InterruptedException { + ArangoDB arangoDB = dbBuilder() + .protocol(protocol) + .build(); + + arangoDB.getVersion(); + arangoDB.shutdown(); + Thread.sleep(500); + Throwable thrown = catchThrowable(arangoDB::getVersion); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + assertThat(thrown.getMessage()).contains("closed"); + } + + @ParameterizedTest + @MethodSource("protocolProvider") + void shutdownAsync(Protocol protocol) throws InterruptedException, ExecutionException { + ArangoDBAsync arangoDB = dbBuilder() + .protocol(protocol) + .build() + .async(); + + arangoDB.getVersion().get(); + arangoDB.shutdown(); + Thread.sleep(500); + Throwable thrown = catchThrowable(() -> arangoDB.getVersion().get()).getCause(); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + assertThat(thrown.getMessage()).contains("closed"); + } + + @ParameterizedTest + @MethodSource("protocolProvider") + void shutdownWithPendingRequests(Protocol protocol) { + assumeTrue(protocol != Protocol.VST); + ArangoDB arangoDB = dbBuilder() + .protocol(protocol) + .build(); + + ScheduledExecutorService es = Executors.newSingleThreadScheduledExecutor(); + es.schedule(arangoDB::shutdown, 500, TimeUnit.MILLISECONDS); + Throwable thrown = catchThrowable(() -> arangoDB.db().query("return sleep(1)", Void.class)); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + assertThat(thrown.getCause()).isInstanceOf(IOException.class); + assertThat(thrown.getCause().getCause()).isInstanceOf(HttpClosedException.class); + es.shutdown(); + } + + @ParameterizedTest + @MethodSource("protocolProvider") + void shutdownWithPendingRequestsAsync(Protocol protocol) { + assumeTrue(protocol != Protocol.VST); + ArangoDBAsync arangoDB = dbBuilder() + .protocol(protocol) + .build() + .async(); + + ScheduledExecutorService es = Executors.newSingleThreadScheduledExecutor(); + es.schedule(arangoDB::shutdown, 500, TimeUnit.MILLISECONDS); + Throwable thrown = catchThrowable(() -> arangoDB.db().query("return sleep(1)", Void.class).get()).getCause(); + assertThat(thrown).isInstanceOf(ArangoDBException.class); + assertThat(thrown.getCause()).isInstanceOf(IOException.class); + assertThat(thrown.getCause().getCause()).isInstanceOf(HttpClosedException.class); + es.shutdown(); + } + +} diff --git a/test-resilience/src/test/java/resilience/timeout/TimeoutClusterTest.java b/test-resilience/src/test/java/resilience/timeout/TimeoutClusterTest.java new file mode 100644 index 000000000..fa80f1364 --- /dev/null +++ b/test-resilience/src/test/java/resilience/timeout/TimeoutClusterTest.java @@ -0,0 +1,104 @@ +package resilience.timeout; + +import com.arangodb.*; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; +import resilience.ClusterTest; + +import java.util.Collections; +import java.util.Map; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeoutException; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.catchThrowable; + +/** + * @author Michele Rastelli + */ +class TimeoutClusterTest extends ClusterTest { + + /** + * on timeout failure: + * - throw exception + * - expect operation performed (at most) once + *

+ * after the exception: + * - the subsequent requests should be successful + */ + @ParameterizedTest + @MethodSource("protocolProvider") + void requestTimeout(Protocol protocol) throws InterruptedException { + ArangoDB arangoDB = dbBuilder() + .timeout(500) + .protocol(protocol) + .build(); + + arangoDB.getVersion(); + String colName = "timeoutTest"; + ArangoCollection col = arangoDB.db().collection(colName); + if (!col.exists()) col.create(); + col.truncate(); + + Throwable thrown = catchThrowable(() -> arangoDB.db() + .query("INSERT {value:sleep(1)} INTO @@col RETURN NEW", + Map.class, + Collections.singletonMap("@col", colName)) + ); + + assertThat(thrown) + .isInstanceOf(ArangoDBException.class) + .extracting(Throwable::getCause) + .isInstanceOf(TimeoutException.class); + + arangoDB.getVersion(); + + Thread.sleep(1_000); + assertThat(col.count().getCount()).isEqualTo(1); + + arangoDB.shutdown(); + } + + /** + * on timeout failure: + * - throw exception + * - expect operation performed (at most) once + *

+ * after the exception: + * - the subsequent requests should be successful + */ + @ParameterizedTest + @MethodSource("protocolProvider") + void requestTimeoutAsync(Protocol protocol) throws InterruptedException, ExecutionException { + ArangoDBAsync arangoDB = dbBuilder() + .timeout(500) + .protocol(protocol) + .build() + .async(); + + arangoDB.getVersion().get(); + String colName = "timeoutTest"; + ArangoCollectionAsync col = arangoDB.db().collection(colName); + if (!col.exists().get()) col.create().get(); + col.truncate().get(); + + Throwable thrown = catchThrowable(() -> arangoDB.db() + .query("INSERT {value:sleep(1)} INTO @@col RETURN NEW", + Map.class, + Collections.singletonMap("@col", colName)).get() + ).getCause(); + + assertThat(thrown) + .isInstanceOf(ArangoDBException.class) + .extracting(Throwable::getCause) + .isInstanceOf(TimeoutException.class); + + arangoDB.getVersion().get(); + + Thread.sleep(1_000); + assertThat(col.count().get().getCount()).isEqualTo(1); + + arangoDB.shutdown(); + } + +} diff --git a/test-resilience/src/test/java/resilience/timeout/TimeoutTest.java b/test-resilience/src/test/java/resilience/timeout/TimeoutTest.java new file mode 100644 index 000000000..00f0f6aab --- /dev/null +++ b/test-resilience/src/test/java/resilience/timeout/TimeoutTest.java @@ -0,0 +1,104 @@ +package resilience.timeout; + +import com.arangodb.*; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; +import resilience.SingleServerTest; + +import java.util.Collections; +import java.util.Map; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeoutException; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.catchThrowable; + +/** + * @author Michele Rastelli + */ +class TimeoutTest extends SingleServerTest { + + /** + * on timeout failure: + * - throw exception + * - expect operation performed (at most) once + *

+ * after the exception: + * - the subsequent requests should be successful + */ + @ParameterizedTest + @MethodSource("protocolProvider") + void requestTimeout(Protocol protocol) throws InterruptedException { + ArangoDB arangoDB = dbBuilder() + .timeout(500) + .protocol(protocol) + .build(); + + arangoDB.getVersion(); + String colName = "timeoutTest"; + ArangoCollection col = arangoDB.db().collection(colName); + if (!col.exists()) col.create(); + col.truncate(); + + Throwable thrown = catchThrowable(() -> arangoDB.db() + .query("INSERT {value:sleep(1)} INTO @@col RETURN NEW", + Map.class, + Collections.singletonMap("@col", colName)) + ); + + assertThat(thrown) + .isInstanceOf(ArangoDBException.class) + .extracting(Throwable::getCause) + .isInstanceOf(TimeoutException.class); + + arangoDB.getVersion(); + + Thread.sleep(1_000); + assertThat(col.count().getCount()).isEqualTo(1); + + arangoDB.shutdown(); + } + + /** + * on timeout failure: + * - throw exception + * - expect operation performed (at most) once + *

+ * after the exception: + * - the subsequent requests should be successful + */ + @ParameterizedTest + @MethodSource("protocolProvider") + void requestTimeoutAsync(Protocol protocol) throws InterruptedException, ExecutionException { + ArangoDBAsync arangoDB = dbBuilder() + .timeout(500) + .protocol(protocol) + .build() + .async(); + + arangoDB.getVersion().get(); + String colName = "timeoutTest"; + ArangoCollectionAsync col = arangoDB.db().collection(colName); + if (!col.exists().get()) col.create().get(); + col.truncate().get(); + + Throwable thrown = catchThrowable(() -> arangoDB.db() + .query("INSERT {value:sleep(1)} INTO @@col RETURN NEW", + Map.class, + Collections.singletonMap("@col", colName)).get() + ).getCause(); + + assertThat(thrown) + .isInstanceOf(ArangoDBException.class) + .extracting(Throwable::getCause) + .isInstanceOf(TimeoutException.class); + + arangoDB.getVersion().get(); + + Thread.sleep(1_000); + assertThat(col.count().get().getCount()).isEqualTo(1); + + arangoDB.shutdown(); + } + +} diff --git a/test-resilience/src/test/java/resilience/ttl/TtlTest.java b/test-resilience/src/test/java/resilience/ttl/TtlTest.java new file mode 100644 index 000000000..17853836a --- /dev/null +++ b/test-resilience/src/test/java/resilience/ttl/TtlTest.java @@ -0,0 +1,83 @@ +package resilience.ttl; + +import ch.qos.logback.classic.Level; +import com.arangodb.ArangoDB; +import com.arangodb.ArangoDBAsync; +import com.arangodb.Protocol; +import io.netty.handler.codec.http2.Http2FrameLogger; +import io.netty.handler.logging.LoggingHandler; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; +import resilience.SingleServerTest; + +import java.time.Duration; +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.ExecutionException; +import java.util.stream.Stream; + +import static org.awaitility.Awaitility.await; + +/** + * @author Michele Rastelli + */ +class TtlTest extends SingleServerTest { + private static final Map, Level> logLevels = new HashMap<>(); + + static { + logLevels.put(LoggingHandler.class, Level.DEBUG); + logLevels.put(Http2FrameLogger.class, Level.DEBUG); + } + + static Stream args() { + return Stream.of( + Arguments.of(Protocol.HTTP_JSON, "UNREGISTERED"), + Arguments.of(Protocol.HTTP2_JSON, "OUTBOUND GO_AWAY") + ); + } + + public TtlTest() { + super(logLevels); + } + + @ParameterizedTest + @MethodSource("args") + void connectionTtl(Protocol p, String expectedLog) { + ArangoDB arangoDB = dbBuilder() + .connectionTtl(1_000L) + .maxConnections(1) + .protocol(p) + .build(); + + arangoDB.getVersion(); + + await() + .timeout(Duration.ofSeconds(3)) + .until(() -> logs.getLogs().anyMatch(it -> it.getFormattedMessage().contains(expectedLog))); + + arangoDB.getVersion(); + arangoDB.shutdown(); + } + + @ParameterizedTest + @MethodSource("args") + void connectionTtlAsync(Protocol p, String expectedLog) throws ExecutionException, InterruptedException { + ArangoDBAsync arangoDB = dbBuilder() + .connectionTtl(1_000L) + .maxConnections(1) + .protocol(p) + .build() + .async(); + + arangoDB.getVersion().get(); + + await() + .timeout(Duration.ofSeconds(3)) + .until(() -> logs.getLogs().anyMatch(it -> it.getFormattedMessage().contains(expectedLog))); + + arangoDB.getVersion().get(); + arangoDB.shutdown(); + } + +} diff --git a/test-resilience/src/test/java/resilience/utils/MemoryAppender.java b/test-resilience/src/test/java/resilience/utils/MemoryAppender.java new file mode 100644 index 000000000..b09e987d8 --- /dev/null +++ b/test-resilience/src/test/java/resilience/utils/MemoryAppender.java @@ -0,0 +1,29 @@ +package resilience.utils; + +import ch.qos.logback.classic.Logger; +import ch.qos.logback.classic.LoggerContext; +import ch.qos.logback.classic.spi.ILoggingEvent; +import ch.qos.logback.core.read.ListAppender; +import org.slf4j.LoggerFactory; + +import java.util.ArrayList; +import java.util.stream.Stream; + +public class MemoryAppender extends ListAppender { + + public MemoryAppender() { + setContext((LoggerContext) LoggerFactory.getILoggerFactory()); + start(); + Logger logger = (Logger) LoggerFactory.getLogger("root"); + logger.addAppender(this); + } + + public void reset() { + list.clear(); + } + + public Stream getLogs() { + // avoid concurrent modification exceptions + return new ArrayList<>(list).stream(); + } +} \ No newline at end of file diff --git a/test-resilience/src/test/java/resilience/vertx/VertxTest.java b/test-resilience/src/test/java/resilience/vertx/VertxTest.java new file mode 100644 index 000000000..5e3342b09 --- /dev/null +++ b/test-resilience/src/test/java/resilience/vertx/VertxTest.java @@ -0,0 +1,115 @@ +package resilience.vertx; + +import ch.qos.logback.classic.Level; +import ch.qos.logback.classic.spi.ILoggingEvent; +import com.arangodb.ArangoDB; +import com.arangodb.PackageVersion; +import com.arangodb.http.HttpConnection; +import com.arangodb.http.HttpProtocolConfig; +import io.vertx.core.Vertx; +import org.junit.jupiter.api.Test; +import resilience.SingleServerTest; + +import java.util.Collections; +import java.util.concurrent.ExecutionException; + +import static org.assertj.core.api.Assertions.assertThat; + +public class VertxTest extends SingleServerTest { + + public VertxTest() { + super(Collections.singletonMap(HttpConnection.class, Level.DEBUG)); + } + + @Test + void managedVertx() { + ArangoDB adb = new ArangoDB.Builder() + .host("172.28.0.1", 8529) + .password("test") + .build(); + + adb.getVersion(); + adb.shutdown(); + + assertThat(logs.getLogs()) + .filteredOn(it -> it.getLoggerName().equals("com.arangodb.http.HttpConnection")) + .filteredOn(it -> it.getLevel().equals(Level.DEBUG)) + .map(ILoggingEvent::getFormattedMessage) + .anySatisfy(it -> assertThat(it).contains("Creating new Vert.x instance")) + .anySatisfy(it -> assertThat(it).contains("Closing Vert.x instance")); + } + + @Test + void reuseVertx() { + Vertx vertx = Vertx.vertx(); + ArangoDB adb = new ArangoDB.Builder() + .host("172.28.0.1", 8529) + .password("test") + .protocolConfig(HttpProtocolConfig.builder().vertx(vertx).build()) + .build(); + adb.getVersion(); + adb.shutdown(); + vertx.close(); + + assertThat(logs.getLogs()) + .filteredOn(it -> it.getLoggerName().equals("com.arangodb.http.HttpConnection")) + .filteredOn(it -> it.getLevel().equals(Level.DEBUG)) + .map(ILoggingEvent::getFormattedMessage) + .anySatisfy(it -> assertThat(it).contains("Reusing existing Vert.x instance")); + } + + @Test + void reuseVertxFromVertxThread() throws ExecutionException, InterruptedException { + Vertx vertx = Vertx.vertx(); + vertx.executeBlocking(() -> { + ArangoDB adb = new ArangoDB.Builder() + .host("172.28.0.1", 8529) + .password("test") + .protocolConfig(HttpProtocolConfig.builder().vertx(Vertx.currentContext().owner()).build()) + .build(); + adb.getVersion(); + adb.shutdown(); + return null; + }).toCompletionStage().toCompletableFuture().get(); + vertx.close(); + + assertThat(logs.getLogs()) + .filteredOn(it -> it.getLoggerName().equals("com.arangodb.http.HttpConnection")) + .filteredOn(it -> it.getLevel().equals(Level.DEBUG)) + .map(ILoggingEvent::getFormattedMessage) + .anySatisfy(it -> assertThat(it).contains("Reusing existing Vert.x instance")); + } + + @Test + void existingVertxNotUsed() throws ExecutionException, InterruptedException { + Vertx vertx = Vertx.vertx(); + vertx.executeBlocking(() -> { + ArangoDB adb = new ArangoDB.Builder() + .host("172.28.0.1", 8529) + .password("test") + .build(); + adb.getVersion(); + adb.shutdown(); + return null; + }).toCompletionStage().toCompletableFuture().get(); + vertx.close(); + + if (!PackageVersion.SHADED) { + assertThat(logs.getLogs()) + .filteredOn(it -> it.getLoggerName().equals("com.arangodb.http.HttpConnectionFactory")) + .filteredOn(it -> it.getLevel().equals(Level.WARN)) + .map(ILoggingEvent::getFormattedMessage) + .anySatisfy(it -> assertThat(it) + .contains("Found an existing Vert.x instance, you can reuse it by setting:") + .contains(".protocolConfig(HttpProtocolConfig.builder().vertx(Vertx.currentContext().owner()).build())") + ); + } + assertThat(logs.getLogs()) + .filteredOn(it -> it.getLoggerName().equals("com.arangodb.http.HttpConnection")) + .filteredOn(it -> it.getLevel().equals(Level.DEBUG)) + .map(ILoggingEvent::getFormattedMessage) + .anySatisfy(it -> assertThat(it).contains("Creating new Vert.x instance")) + .anySatisfy(it -> assertThat(it).contains("Closing Vert.x instance")); + } + +} diff --git a/test-resilience/src/test/java/resilience/vstKeepAlive/VstKeepAliveCloseTest.java b/test-resilience/src/test/java/resilience/vstKeepAlive/VstKeepAliveCloseTest.java new file mode 100644 index 000000000..c38309a12 --- /dev/null +++ b/test-resilience/src/test/java/resilience/vstKeepAlive/VstKeepAliveCloseTest.java @@ -0,0 +1,89 @@ +package resilience.vstKeepAlive; + +import ch.qos.logback.classic.Level; +import com.arangodb.ArangoDB; +import com.arangodb.Protocol; +import resilience.SingleServerTest; +import eu.rekawek.toxiproxy.model.ToxicDirection; +import eu.rekawek.toxiproxy.model.toxic.Latency; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; + +import java.io.IOException; +import java.time.Duration; +import java.util.concurrent.ExecutionException; + +import static org.awaitility.Awaitility.await; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + +/** + * @author Michele Rastelli + */ +class VstKeepAliveCloseTest extends SingleServerTest { + + private ArangoDB arangoDB; + + @BeforeEach + void init() { + assumeTrue(isLessThanVersion(3, 12)); + arangoDB = dbBuilder() + .protocol(Protocol.VST) + .timeout(1000) + .keepAliveInterval(1) + .build(); + } + + @AfterEach + void shutDown() { + if (arangoDB != null) { + arangoDB.shutdown(); + } + } + + /** + * after 3 consecutive VST keepAlive failures: + * - log ERROR Connection unresponsive + * - reconnect on next request + */ + @Test + @Timeout(10) + void keepAliveCloseAndReconnect() throws IOException, InterruptedException { + arangoDB.getVersion(); + Latency toxic = getEndpoint().getProxy().toxics().latency("latency", ToxicDirection.DOWNSTREAM, 10_000); + await() + .timeout(Duration.ofSeconds(3)) + .until(() -> logs.getLogs() + .filter(e -> e.getLevel().equals(Level.ERROR)) + .filter(e -> e.getFormattedMessage() != null) + .anyMatch(e -> e.getFormattedMessage().contains("Connection unresponsive!"))); + toxic.setLatency(0); + toxic.remove(); + Thread.sleep(100); + arangoDB.getVersion(); + } + + /** + * after 3 consecutive VST keepAlive failures: + * - log ERROR Connection unresponsive + * - reconnect on next request + */ + @Test + @Timeout(10) + void keepAliveCloseAndReconnectAsync() throws IOException, ExecutionException, InterruptedException { + arangoDB.async().getVersion().get(); + Latency toxic = getEndpoint().getProxy().toxics().latency("latency", ToxicDirection.DOWNSTREAM, 10_000); + await() + .timeout(Duration.ofSeconds(3)) + .until(() -> logs.getLogs() + .filter(e -> e.getLevel().equals(Level.ERROR)) + .filter(e -> e.getFormattedMessage() != null) + .anyMatch(e -> e.getFormattedMessage().contains("Connection unresponsive!"))); + toxic.setLatency(0); + toxic.remove(); + Thread.sleep(100); + arangoDB.async().getVersion().get(); + } + +} diff --git a/test-resilience/src/test/resources/example.truststore b/test-resilience/src/test/resources/example.truststore new file mode 100644 index 000000000..e683a48b8 Binary files /dev/null and b/test-resilience/src/test/resources/example.truststore differ diff --git a/test-resilience/src/test/resources/logback-test.xml b/test-resilience/src/test/resources/logback-test.xml new file mode 100644 index 000000000..f42e5d7f9 --- /dev/null +++ b/test-resilience/src/test/resources/logback-test.xml @@ -0,0 +1,13 @@ + + + + + %d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n + + + + + + + + diff --git a/tests/travis/server.pem b/tests/travis/server.pem deleted file mode 100644 index 108998782..000000000 --- a/tests/travis/server.pem +++ /dev/null @@ -1,29 +0,0 @@ ------BEGIN CERTIFICATE----- -MIICKTCCAZICCQDrch/8O8H4PTANBgkqhkiG9w0BAQUFADBZMQswCQYDVQQGEwJE -RTETMBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UECgwYSW50ZXJuZXQgV2lkZ2l0 -cyBQdHkgTHRkMRIwEAYDVQQDDAlsb2NhbGhvc3QwHhcNMTIwNzI0MDYxNzUwWhcN -MTMwNzI0MDYxNzUwWjBZMQswCQYDVQQGEwJERTETMBEGA1UECAwKU29tZS1TdGF0 -ZTEhMB8GA1UECgwYSW50ZXJuZXQgV2lkZ2l0cyBQdHkgTHRkMRIwEAYDVQQDDAls -b2NhbGhvc3QwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAMDwRweF75H+BFBY -m9c4V/AZyWehxQ+pRfy6d+oPiUbOByqLDEQBdiMamQ0acJX9sn+MsFixEDRC1Y8r -ef49k1hb9V/pPtSFPVl5y8Db4FiDabKD9juUvNRqCXAzu99dwJwFZU2Ldq4BiCmf -8V0SO+nHJHs3+HNgbYU+g77Fax7dAgMBAAEwDQYJKoZIhvcNAQEFBQADgYEASYfI -Afq5lRn+XoESn+PQ89XJ83Jr5mJhDkT7ebw+w1CQsnR9uYIeAYNxOyb3bfHOntnz -3irGYJWkrewVYjNkID8jhZSYEOZkfC4jxNLigg7QXyw/XWwwCIEr8/ZW+cnci43G -+IhSspyoJnljELM99ZsW0IaBLT+tgHzep4bIh0Y= ------END CERTIFICATE----- ------BEGIN RSA PRIVATE KEY----- -MIICXgIBAAKBgQDA8EcHhe+R/gRQWJvXOFfwGclnocUPqUX8unfqD4lGzgcqiwxE -AXYjGpkNGnCV/bJ/jLBYsRA0QtWPK3n+PZNYW/Vf6T7UhT1ZecvA2+BYg2myg/Y7 -lLzUaglwM7vfXcCcBWVNi3auAYgpn/FdEjvpxyR7N/hzYG2FPoO+xWse3QIDAQAB -AoGBALsP+34C0M02gI456xbXDQPArLJqePp/P1kSnbL0zz80AGSyHflNhXHxltTK -fsAeElNyX4QOYEybRB2kpInov5KrL3lR926GY++otHpSAue+PfLJHH8OU8qjartw -KGlzdnFFYQVLNc0qYGl/h0MxH6+Ce8JV0y1zI4J3nwMwCZwBAkEA6uqluGj4SXIj -6W+FefuHlHXgFn+TZgsGNNmmCMhLJPUUP7auIagtG1n12mN47kWqhLu3asLEVq4+ -YnCEa0hF3QJBANJBKN/zyfmvkiET33cY03282gSN0YLyibMYoNDOOYMFEW2b+3oF -x0d40FfEkYS0R4rJiR1HI8xWw8uQEWsXLQECQQDRx1DN6Q8vpznij7BGShO8w7ak -4LAkA9w6/dGDLjnMev7mFqBRdfkx35foucOYI6YueeNE90CVl2wmRAw8MYIRAkEA -qUUgPTyuDMCWqt6u99ka243hD+2FESFmTrOzNKfykVLRNydvDEv+pcHUKfTtGqNx -PYEHTHTrkyT/OLFh9I+SAQJAOntw15vhgKiy0DUvjSvXGJziBDPsu/MCZ2CZbaVx -ipkUZQCZLd/HvMphAiirLV+mAJQC732KKlR9/HWRzDDFEQ== ------END RSA PRIVATE KEY----- diff --git a/tests/travis/setup_arangodb.sh b/tests/travis/setup_arangodb.sh deleted file mode 100755 index 01a736e57..000000000 --- a/tests/travis/setup_arangodb.sh +++ /dev/null @@ -1,56 +0,0 @@ -#!/bin/bash - -DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" -cd $DIR - -VERSION=devel -NAME=ArangoDB-$VERSION - -if [ ! -d "$DIR/$NAME" ]; then - # download ArangoDB - echo "wget https://www.arangodb.com/repositories/travisCI/$NAME.tar.gz" - wget https://www.arangodb.com/repositories/travisCI/$NAME.tar.gz - echo "tar zxf $NAME.tar.gz" - tar zvxf $NAME.tar.gz -fi - -ARCH=$(arch) -PID=$(echo $PPID) -TMP_DIR="/tmp/arangodb.$PID" -PID_FILE="/tmp/arangodb.$PID.pid" -ARANGODB_DIR="$DIR/$NAME" -ARANGOD="${ARANGODB_DIR}/bin/arangod_x86_64" - -# create database directory -mkdir ${TMP_DIR} - -echo "Starting ArangoDB '${ARANGOD}'" - -${ARANGOD} \ - --database.directory ${TMP_DIR} \ - --configuration none \ - --server.endpoint tcp://127.0.0.1:8529 \ - --server.endpoint ssl://127.0.0.1:8530 \ - --ssl.keyfile ./server.pem \ - --javascript.app-path ${ARANGODB_DIR}/js/apps \ - --javascript.startup-directory ${ARANGODB_DIR}/js \ - --server.authentication=true & - -sleep 2 - -echo "Check for arangod process" -process=$(ps auxww | grep "bin/arangod" | grep -v grep) - -if [ "x$process" == "x" ]; then - echo "no 'arangod' process found" - echo "ARCH = $ARCH" - exit 1 -fi - -echo "Waiting until ArangoDB is ready on port 8529" -while [[ -z `curl -uroot: -s 'http://127.0.0.1:8529/_api/version' ` ]] ; do - echo -n "." - sleep 2s -done - -echo "ArangoDB is up" diff --git a/tutorial/.gitignore b/tutorial/.gitignore new file mode 100644 index 000000000..f631b0010 --- /dev/null +++ b/tutorial/.gitignore @@ -0,0 +1,32 @@ +# Compiled class file +*.class + +# Log file +*.log + +# BlueJ files +*.ctxt + +# Mobile Tools for Java (J2ME) +.mtj.tmp/ + +# Package Files # +*.jar +*.war +*.nar +*.ear +*.zip +*.tar.gz +*.rar + +# virtual machine crash logs, see http://www.java.com/en/download/help/error_hotspot.xml +hs_err_pid* +target +**/.idea +*.iml +**/.directory +/gradle/.gradle/ +/gradle/build/ +/gradle/gradle/ +/gradle/gradlew +/gradle/gradlew.bat diff --git a/tutorial/README.md b/tutorial/README.md new file mode 100644 index 000000000..95edb57f4 --- /dev/null +++ b/tutorial/README.md @@ -0,0 +1,4 @@ +# ArangoDB Java driver tutorial + +This folder contains the code for the +[Java driver tutorial](https://docs.arangodb.com/stable/develop/drivers/java/). diff --git a/tutorial/Tutorial.md b/tutorial/Tutorial.md new file mode 100644 index 000000000..fccf40d45 --- /dev/null +++ b/tutorial/Tutorial.md @@ -0,0 +1,474 @@ +# ArangoDB Java driver + +The official ArangoDB Java Driver. + +- Repository: +- [Code examples](https://github.com/arangodb/arangodb-java-driver/tree/main/test-non-functional/src/test/java/example) +- [Reference](reference-version-7/_index.md) (driver setup, serialization, changes in version 7) +- [JavaDoc](https://www.javadoc.io/doc/com.arangodb/arangodb-java-driver/latest/index.html) (generated reference documentation) +- [ChangeLog](https://github.com/arangodb/arangodb-java-driver/blob/main/ChangeLog.md) + +## Supported versions + +Version 7 is the latest supported and actively developed release. + +The driver is compatible with all supported stable versions of ArangoDB server, see +[Product Support End-of-life Announcements](https://arangodb.com/subscriptions/end-of-life-notice/). + +The driver is compatible with JDK 8 and higher versions. + +{{< warning >}} +Version 6 reached End of Life (EOL) and is not actively developed anymore. +Upgrading to version 7 is recommended. + +The API changes between version 6 and 7 are documented in +[Changes in version 7](reference-version-7/changes-in-version-7.md). +{{< /warning >}} + +## Project configuration + +To use the ArangoDB Java driver, you need to import `arangodb-java-driver` as a +library into your project. This is described below for the popular Java build +automation systems Maven and Gradle. + +### Maven + +To add the driver to your project with Maven, add the following code to your +`pom.xml` (substitute `7.x.x` with the latest driver version): + +```xml + + + com.arangodb + arangodb-java-driver + 7.x.x + + +``` + +### Gradle + +To add the driver to your project with Gradle, add the following code to your +`build.gradle` (substitute `7.x.x` with the latest driver version): + +```groovy +repositories { + mavenCentral() +} + +dependencies { + implementation 'com.arangodb:arangodb-java-driver:7.x.x' +} +``` + +## Tutorial + +### Connect to ArangoDB + +Let's configure and open a connection to ArangoDB. The default connection is to +`127.0.0.1:8529`. Change the connection details to point to your specific instance. + +```java +ArangoDB arangoDB = new ArangoDB.Builder() + .host("localhost", 8529) + .user("root") + .password("") + .build(); +``` + +For more connections options and details, see +[Driver setup](reference-version-7/driver-setup.md). + +### Create a database + +Let's create a new database: + +```java +ArangoDatabase db = arangoDB.db("mydb"); +System.out.println("Creating database..."); +db.create(); +``` + +### Create a collection + +Now let's create our first collection: + +```java +ArangoCollection collection = db.collection("firstCollection"); +System.out.println("Creating collection..."); +collection.create(); +``` + +### Create a document + +Let's create a document in the collection. Any object can be added as a document +to the database and be retrieved from the database as an object. + +This example uses the `BaseDocument` class, provided with the driver. The +attributes of the document are stored in a map as `key`/`value` pair: + +```java +String key = "myKey"; +BaseDocument doc = new BaseDocument(key); +doc.addAttribute("a", "Foo"); +doc.addAttribute("b", 42); +System.out.println("Inserting document..."); +collection.insertDocument(doc); +``` + +Some details you should know about the code: + +- The document key is passed to the `BaseDocument` constructor +- The `addAttribute()` method puts a new key/value pair into the document +- Each attribute is stored as a single key value pair in the document root + +### Read a document + +Read the created document: + +```java +System.out.println("Reading document..."); +BaseDocument readDocument = collection.getDocument(key, BaseDocument.class); +System.out.println("Key: " + readDocument.getKey()); +System.out.println("Attribute a: " + readDocument.getAttribute("a")); +System.out.println("Attribute b: " + readDocument.getAttribute("b")); +``` + +After executing this program, the console output should be: + +```text +Key: myKey +Attribute a: Foo +Attribute b: 42 +``` + +Some details you should know about the code: + +- The `getDocument()` method reads the stored document data and deserializes it + into the given class (`BaseDocument`) + +### Create a document from Jackson JsonNode + +You can also create a document from a Jackson +[JsonNode](https://fasterxml.github.io/jackson-databind/javadoc/2.13/com/fasterxml/jackson/databind/JsonNode.html) +object: + +```java +System.out.println("Creating a document from Jackson JsonNode..."); +String keyJackson = "myJacksonKey"; +JsonNode jsonNode = JsonNodeFactory.instance.objectNode() + .put("_key", keyJackson) + .put("a", "Bar") + .put("b", 53); +System.out.println("Inserting document from Jackson JsonNode..."); +collection.insertDocument(jsonNode); +``` + +### Read a document as Jackson JsonNode + +You can also read a document as a Jackson +[JsonNode](https://fasterxml.github.io/jackson-databind/javadoc/2.13/com/fasterxml/jackson/databind/JsonNode.html): + +```java +System.out.println("Reading document as Jackson JsonNode..."); +JsonNode readJsonNode = collection.getDocument(keyJackson, JsonNode.class); +System.out.println("Key: " + readJsonNode.get("_key").textValue()); +System.out.println("Attribute a: " + readJsonNode.get("a").textValue()); +System.out.println("Attribute b: " + readJsonNode.get("b").intValue()); +``` + +After executing this program, the console output should be: + +```text +Key: myKey +Attribute a: Bar +Attribute b: 53 +``` + +Some details you should know about the code: + +- The `getDocument()` method returns the stored document as instance of + `com.fasterxml.jackson.databind.JsonNode`. + +### Create a document from JSON String + +You can also create a document from raw JSON string: + +```java +System.out.println("Creating a document from JSON String..."); +String keyJson = "myJsonKey"; +RawJson json = RawJson.of("{\"_key\":\"" + keyJson + "\",\"a\":\"Baz\",\"b\":64}"); +System.out.println("Inserting document from JSON String..."); +collection.insertDocument(json); +``` + +### Read a document as JSON String + +You can also read a document as raw JSON string: + +```java +System.out.println("Reading document as JSON String..."); +RawJson readJson = collection.getDocument(keyJson, RawJson.class); +System.out.println(readJson.get()); +``` + +After executing this program, the console output should be: + +```text +{"_key":"myJsonKey","_id":"firstCollection/myJsonKey","_rev":"_e0nEe2y---","a":"Baz","b":64} +``` + +### Update a document + +Let's update the document: + +```java +doc.addAttribute("c", "Bar"); +System.out.println("Updating document ..."); +collection.updateDocument(key, doc); +``` + +### Read the document again + +Let's read the document again: + +```java +System.out.println("Reading updated document ..."); +BaseDocument updatedDocument = collection.getDocument(key, BaseDocument.class); +System.out.println("Key: " + updatedDocument.getKey()); +System.out.println("Attribute a: " + updatedDocument.getAttribute("a")); +System.out.println("Attribute b: " + updatedDocument.getAttribute("b")); +System.out.println("Attribute c: " + updatedDocument.getAttribute("c")); +``` + +After executing this program, the console output should look like this: + +```text +Key: myKey +Attribute a: Foo +Attribute b: 42 +Attribute c: Bar +``` + +### Delete a document + +Let's delete a document: + +```java +System.out.println("Deleting document ..."); +collection.deleteDocument(key); +``` + +### Execute AQL queries + +First, you need to create some documents with the name `Homer` in the +collection called `firstCollection`: + +```java +for (int i = 0; i < 10; i++) { + BaseDocument value = new BaseDocument(String.valueOf(i)); + value.addAttribute("name", "Homer"); + collection.insertDocument(value); +} +``` + +Get all documents with the name `Homer` from the collection using an AQL query +and iterate over the results: + +```java +String query = "FOR t IN firstCollection FILTER t.name == @name RETURN t"; +Map bindVars = Collections.singletonMap("name", "Homer"); +System.out.println("Executing read query ..."); +ArangoCursor cursor = db.query(query, bindVars, null, BaseDocument.class); +cursor.forEach(aDocument -> System.out.println("Key: " + aDocument.getKey())); +``` + +After executing this program, the console output should look something like this: + +```text +Key: 1 +Key: 0 +Key: 5 +Key: 3 +Key: 4 +Key: 9 +Key: 2 +Key: 7 +Key: 8 +Key: 6 +``` + +Some details you should know about the code: + +- The AQL query uses the placeholder `@name` that has to be bound to a value +- The `query()` method executes the defined query and returns an `ArangoCursor` + with the given class (here: `BaseDocument`) +- The order of is not guaranteed + +### Delete documents with AQL + +Delete previously created documents: + +```java +String query = "FOR t IN firstCollection FILTER t.name == @name " + + "REMOVE t IN firstCollection LET removed = OLD RETURN removed"; +Map bindVars = Collections.singletonMap("name", "Homer"); +System.out.println("Executing delete query ..."); +ArangoCursor cursor = db.query(query, bindVars, null, BaseDocument.class); +cursor.forEach(aDocument -> System.out.println("Removed document " + aDocument.getKey())); +``` + +After executing this program, the console output should look something like this: + +```text +Removed document: 1 +Removed document: 0 +Removed document: 5 +Removed document: 3 +Removed document: 4 +Removed document: 9 +Removed document: 2 +Removed document: 7 +Removed document: 8 +Removed document: 6 +``` + +### Learn more + +- Have a look at the [AQL documentation](../../../aql/) to lear about the + query language +- See [Serialization](reference-version-7/serialization.md) for details about + user-data serde +- For the full reference documentation, see + [JavaDoc](https://www.javadoc.io/doc/com.arangodb/arangodb-java-driver/latest/index.html) + +## GraalVM Native Image + +The driver supports GraalVM Native Image compilation. +To compile with `--link-at-build-time` when `http-protocol` module is present in +the classpath, additional substitutions are required for transitive dependencies +`Netty` and `Vert.x`. See this +[example](https://github.com/arangodb/arangodb-java-driver/tree/main/test-functional/src/test-default/java/graal) +for reference. Such substitutions are not required when compiling the shaded driver. + +### Framework compatibility + +The driver can be used in the following frameworks that support +GraalVM Native Image generation: + +- [Quarkus](https://quarkus.io), see [arango-quarkus-native-example](https://github.com/arangodb-helper/arango-quarkus-native-example) +- [Helidon](https://helidon.io), see [arango-helidon-native-example](https://github.com/arangodb-helper/arango-helidon-native-example) +- [Micronaut](https://micronaut.io), see [arango-micronaut-native-example](https://github.com/arangodb-helper/arango-micronaut-native-example) + +## ArangoDB Java Driver Shaded + +A shaded variant of the driver is also published with +Maven coordinates: `com.arangodb:arangodb-java-driver-shaded`. + +It bundles and relocates the following packages: +- `com.fasterxml.jackson` +- `com.arangodb.jackson.dataformat.velocypack` +- `io.vertx` +- `io.netty` + +Note that the **internal serde** internally uses Jackson classes from +`com.fasterxml.jackson` that are relocated to `com.arangodb.shaded.fasterxml.jackson`. +Therefore, the **internal serde** of the shaded driver is not compatible with +Jackson annotations and modules from package`com.fasterxml.jackson`, but only +with their relocated variants. In case the **internal serde** is used as +**user-data serde**, the annotations from package `com.arangodb.serde` can be +used to annotate fields, parameters, getters and setters for mapping values +representing ArangoDB documents metadata (`_id`, `_key`, `_rev`, `_from`, `_to`): +- `@InternalId` +- `@InternalKey` +- `@InternalRev` +- `@InternalFrom` +- `@InternalTo` + +These annotations are compatible with relocated Jackson classes. +Note that the **internal serde** is not part of the public API and could change +in future releases without notice, thus breaking client applications relying on +it to serialize or deserialize user-data. It is therefore recommended also in +this case either: +- using the default user-data serde `JacksonSerde` + (from packages `com.arangodb:jackson-serde-json` or `com.arangodb:jackson-serde-vpack`), or +- providing a custom user-data serde implementation via `ArangoDB.Builder.serde(ArangoSerde)`. + +## Support for extended naming constraints + +The driver supports ArangoDB's **extended** naming constraints/convention, +allowing most UTF-8 characters in the names of: +- Databases +- Collections +- Views +- Indexes + +These names must be NFC-normalized, otherwise the server returns an error. +To normalize a string, use the function +`com.arangodb.util.UnicodeUtils.normalize(String): String`: + +```java +String normalized = UnicodeUtils.normalize("π”Έπ•£π•’π•Ÿπ•˜π• π”»π”Ή"); +``` + +To check if a string is already normalized, use the +function `com.arangodb.util.UnicodeUtils.isNormalized(String): boolean`: + +```java +boolean isNormalized = UnicodeUtils.isNormalized("π”Έπ•£π•’π•Ÿπ•˜π• π”»π”Ή"); +``` + +## Async API + +The asynchronous API is accessible via `ArangoDB#async()`, for example: + +```java +ArangoDB adb = new ArangoDB.Builder() + // ... + .build(); +ArangoDBAsync adbAsync = adb.async(); +CompletableFuture version = adbAsync.getVersion(); +// ... +``` + +Under the hood, both synchronous and asynchronous API use the same internal +communication layer, which has been reworked and re-implemented in an +asynchronous way. The synchronous API blocks and waits for the result, while the +asynchronous one returns a `CompletableFuture<>` representing the pending +operation being performed. +Each asynchronous API method is equivalent to the corresponding synchronous +variant, except for the Cursor API. + +### Async Cursor API + +The Cursor API (`ArangoCursor` and `ArangoCursorAsync`) is intrinsically different, +because the synchronous Cursor API is based on Java's `java.util.Iterator`, which +is an interface only suitable for synchronous scenarios. +On the other side, the asynchronous Cursor API provides a method +`com.arangodb.ArangoCursorAsync#nextBatch()`, which returns a +`CompletableFuture>` and can be used to consume the next +batch of the cursor, for example: + +```java +CompletableFuture> future1 = adbAsync.db() + .query("FOR i IN i..10000", Integer.class); +CompletableFuture> future2 = future1 + .thenCompose(c -> { + List batch = c.getResult(); + // ... + // consume batch + // ... + return c.nextBatch(); + }); +// ... +``` + +## Data Definition Classes + +Classes used to exchange data definitions, in particular classes in the packages +`com.arangodb.entity.**` and `com.arangodb.model.**`, are meant to be serialized +and deserialized internally by the driver. + +The behavior to serialize and deserialize these classes is considered an internal +implementation detail, and as such, it might change without prior notice. +The API with regard to the public members of these classes is kept compatible. diff --git a/tutorial/gradle/build.gradle b/tutorial/gradle/build.gradle new file mode 100644 index 000000000..dca54df52 --- /dev/null +++ b/tutorial/gradle/build.gradle @@ -0,0 +1,24 @@ +plugins { + id 'java' + id 'application' +} + +group 'com.arangodb' +version '1.0-SNAPSHOT' + +repositories { + mavenLocal() + mavenCentral() +} + +dependencies { + implementation 'com.arangodb:arangodb-java-driver:7.22.0' +} + +ext { + javaMainClass = "FirstProject" +} + +application { + mainClassName = javaMainClass +} diff --git a/tutorial/gradle/settings.gradle b/tutorial/gradle/settings.gradle new file mode 100644 index 000000000..28160edab --- /dev/null +++ b/tutorial/gradle/settings.gradle @@ -0,0 +1 @@ +rootProject.name = 'arangodb-java-driver-tutorial-gradle' diff --git a/tutorial/gradle/src b/tutorial/gradle/src new file mode 120000 index 000000000..c927ed2aa --- /dev/null +++ b/tutorial/gradle/src @@ -0,0 +1 @@ +../maven/src \ No newline at end of file diff --git a/tutorial/maven/pom.xml b/tutorial/maven/pom.xml new file mode 100644 index 000000000..507adb245 --- /dev/null +++ b/tutorial/maven/pom.xml @@ -0,0 +1,26 @@ + + + 4.0.0 + + com.arangodb + arangodb-java-driver-tutorial-maven + 1.0-SNAPSHOT + + + 17 + 17 + 17 + UTF-8 + + + + + com.arangodb + arangodb-java-driver + 7.22.0 + + + + diff --git a/tutorial/maven/src/main/java/FirstProject.java b/tutorial/maven/src/main/java/FirstProject.java new file mode 100644 index 000000000..eab073c13 --- /dev/null +++ b/tutorial/maven/src/main/java/FirstProject.java @@ -0,0 +1,134 @@ +import com.arangodb.*; +import com.arangodb.entity.BaseDocument; +import com.arangodb.util.RawJson; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.node.JsonNodeFactory; + +import java.util.Collections; +import java.util.Map; + +public class FirstProject { + private static final ArangoDB arangoDB = new ArangoDB.Builder() + .host("172.28.0.1", 8529) + .password("test") + .build(); + + private static void cleanup() { + ArangoDatabase db = arangoDB.db("mydb"); + if (db.exists()) db.drop(); + } + + public static void main(String[] args) { + cleanup(); + + // Creating a database + ArangoDatabase db = arangoDB.db("mydb"); + System.out.println("Creating database..."); + db.create(); + + // Creating a collection + ArangoCollection collection = db.collection("firstCollection"); + System.out.println("Creating collection..."); + collection.create(); + + // Creating a document + String key = "myKey"; + BaseDocument doc = new BaseDocument(key); + doc.addAttribute("a", "Foo"); + doc.addAttribute("b", 42); + System.out.println("Inserting document..."); + collection.insertDocument(doc); + + // Read a document + { + System.out.println("Reading document..."); + BaseDocument readDocument = collection.getDocument(key, BaseDocument.class); + System.out.println("Key: " + readDocument.getKey()); + System.out.println("Attribute a: " + readDocument.getAttribute("a")); + System.out.println("Attribute b: " + readDocument.getAttribute("b")); + } + + // Creating a document from Jackson JsonNode + String keyJackson = "myJacksonKey"; + JsonNode jsonNode = JsonNodeFactory.instance.objectNode() + .put("_key", keyJackson) + .put("a", "Bar") + .put("b", 53); + System.out.println("Inserting document from Jackson JsonNode..."); + collection.insertDocument(jsonNode); + + // Read a document as Jackson JsonNode + { + System.out.println("Reading document as Jackson JsonNode..."); + JsonNode readJsonNode = collection.getDocument(keyJackson, JsonNode.class); + System.out.println("Key: " + readJsonNode.get("_key").textValue()); + System.out.println("Attribute a: " + readJsonNode.get("a").textValue()); + System.out.println("Attribute b: " + readJsonNode.get("b").intValue()); + } + + // Creating a document from JSON String + String keyJson = "myJsonKey"; + RawJson json = RawJson.of(""" + {"_key":"%s","a":"Baz","b":64} + """.formatted(keyJson)); + System.out.println("Inserting document from JSON String..."); + collection.insertDocument(json); + + // Read a document as JSON String + { + System.out.println("Reading document as JSON String..."); + RawJson readJson = collection.getDocument(keyJson, RawJson.class); + System.out.println(readJson.get()); + } + + // Update a document + { + doc.addAttribute("c", "Bar"); + System.out.println("Updating document ..."); + collection.updateDocument(key, doc); + } + + // Read the document again + { + System.out.println("Reading updated document ..."); + BaseDocument updatedDocument = collection.getDocument(key, BaseDocument.class); + System.out.println("Key: " + updatedDocument.getKey()); + System.out.println("Attribute a: " + updatedDocument.getAttribute("a")); + System.out.println("Attribute b: " + updatedDocument.getAttribute("b")); + System.out.println("Attribute c: " + updatedDocument.getAttribute("c")); + } + + // Delete a document + { + System.out.println("Deleting document ..."); + collection.deleteDocument(key); + } + + // Execute AQL queries + { + for (int i = 0; i < 10; i++) { + BaseDocument value = new BaseDocument(String.valueOf(i)); + value.addAttribute("name", "Homer"); + collection.insertDocument(value); + } + + String query = "FOR t IN firstCollection FILTER t.name == @name RETURN t"; + Map bindVars = Collections.singletonMap("name", "Homer"); + System.out.println("Executing read query ..."); + ArangoCursor cursor = db.query(query, BaseDocument.class, bindVars); + cursor.forEach(aDocument -> System.out.println("Key: " + aDocument.getKey())); + } + + // Delete a document with AQL + { + String query = "FOR t IN firstCollection FILTER t.name == @name " + + "REMOVE t IN firstCollection LET removed = OLD RETURN removed"; + Map bindVars = Collections.singletonMap("name", "Homer"); + System.out.println("Executing delete query ..."); + ArangoCursor cursor = db.query(query, BaseDocument.class, bindVars); + cursor.forEach(aDocument -> System.out.println("Removed document " + aDocument.getKey())); + } + + arangoDB.shutdown(); + } +} diff --git a/vst-protocol/pom.xml b/vst-protocol/pom.xml new file mode 100644 index 000000000..b260d875a --- /dev/null +++ b/vst-protocol/pom.xml @@ -0,0 +1,35 @@ + + + 4.0.0 + + + ../release-parent + com.arangodb + release-parent + 7.22.0 + + + vst-protocol + vst-protocol + VST Protocol module for ArangoDB Java Driver + + + com.arangodb.vst + + + + + com.arangodb + core + provided + + + com.arangodb + velocypack + compile + + + + \ No newline at end of file diff --git a/vst-protocol/src/main/java/com/arangodb/vst/VstCommunication.java b/vst-protocol/src/main/java/com/arangodb/vst/VstCommunication.java new file mode 100644 index 000000000..c9150f5e1 --- /dev/null +++ b/vst-protocol/src/main/java/com/arangodb/vst/VstCommunication.java @@ -0,0 +1,108 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.vst; + +import com.arangodb.ArangoDBException; +import com.arangodb.arch.UnstableApi; +import com.arangodb.internal.InternalRequest; +import com.arangodb.internal.InternalResponse; +import com.arangodb.internal.config.ArangoConfig; +import com.arangodb.internal.net.Communication; +import com.arangodb.internal.net.Connection; +import com.arangodb.internal.net.HostHandler; +import com.arangodb.internal.util.ResponseUtils; +import com.arangodb.vst.internal.AuthenticationRequest; +import com.arangodb.vst.internal.JwtAuthenticationRequest; +import com.arangodb.vst.internal.VstConnectionAsync; + +import java.io.IOException; +import java.util.concurrent.ExecutionException; + +/** + * @author Mark Vollmary + */ +@UnstableApi +public final class VstCommunication extends Communication { + private static final String ENCRYPTION_PLAIN = "plain"; + private static final String ENCRYPTION_JWT = "jwt"; + + private final String user; + private final String password; + private volatile String jwt; + + public VstCommunication(@UnstableApi final ArangoConfig config, @UnstableApi final HostHandler hostHandler) { + super(config, hostHandler); + user = config.getUser(); + password = config.getPassword(); + jwt = config.getJwt(); + } + + @Override + protected void connect(@UnstableApi Connection conn) throws IOException { + VstConnectionAsync connection = (VstConnectionAsync) conn; + if (!connection.isOpen()) { + connection.open(); + if (jwt != null || user != null) { + tryAuthenticate(connection); + } + } + } + + private void tryAuthenticate(final VstConnectionAsync connection) throws IOException { + try { + authenticate(connection); + } catch (final ArangoDBException authException) { + connection.close(); + throw authException; + } + } + + private void authenticate(final VstConnectionAsync connection) throws IOException { + InternalRequest authRequest; + if (jwt != null) { + authRequest = new JwtAuthenticationRequest(jwt, ENCRYPTION_JWT); + } else { + authRequest = new AuthenticationRequest(user, password != null ? password : "", ENCRYPTION_PLAIN); + } + + InternalResponse response; + try { + response = connection.executeAsync(authRequest).get(); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw ArangoDBException.of(e); + } catch (ExecutionException e) { + throw new IOException(e.getCause()); + } + checkError(response); + } + + + private void checkError(final InternalResponse response) { + ArangoDBException e = ResponseUtils.translateError(serde, response); + if (e != null) throw e; + } + + public void setJwt(String jwt) { + this.jwt = jwt; + } + +} diff --git a/vst-protocol/src/main/java/com/arangodb/vst/VstConnectionFactoryAsync.java b/vst-protocol/src/main/java/com/arangodb/vst/VstConnectionFactoryAsync.java new file mode 100644 index 000000000..1db7852a0 --- /dev/null +++ b/vst-protocol/src/main/java/com/arangodb/vst/VstConnectionFactoryAsync.java @@ -0,0 +1,43 @@ +/* + * DISCLAIMER + * + * Copyright 2018 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.vst; + +import com.arangodb.arch.UnstableApi; +import com.arangodb.config.HostDescription; +import com.arangodb.internal.config.ArangoConfig; +import com.arangodb.internal.net.Connection; +import com.arangodb.internal.net.ConnectionFactory; +import com.arangodb.internal.net.ConnectionPool; +import com.arangodb.vst.internal.VstConnectionAsync; + +/** + * @author Mark Vollmary + */ +@UnstableApi +public class VstConnectionFactoryAsync implements ConnectionFactory { + + @Override + @UnstableApi + public Connection create(@UnstableApi final ArangoConfig config, final HostDescription host, @UnstableApi final ConnectionPool pool) { + return new VstConnectionAsync(config, host, pool); + } + +} diff --git a/vst-protocol/src/main/java/com/arangodb/vst/VstModule.java b/vst-protocol/src/main/java/com/arangodb/vst/VstModule.java new file mode 100644 index 000000000..255a041fd --- /dev/null +++ b/vst-protocol/src/main/java/com/arangodb/vst/VstModule.java @@ -0,0 +1,26 @@ +package com.arangodb.vst; + +import com.arangodb.vst.internal.AuthenticationRequest; +import com.arangodb.vst.internal.JwtAuthenticationRequest; +import com.fasterxml.jackson.databind.Module; +import com.fasterxml.jackson.databind.module.SimpleModule; + +import java.util.function.Supplier; + +enum VstModule implements Supplier { + INSTANCE; + + private final SimpleModule module; + + VstModule() { + module = new SimpleModule(); + module.addSerializer(AuthenticationRequest.class, VstSerializers.AUTHENTICATION_REQUEST); + module.addSerializer(JwtAuthenticationRequest.class, VstSerializers.JWT_AUTHENTICATION_REQUEST); + } + + @Override + public Module get() { + return module; + } + +} diff --git a/vst-protocol/src/main/java/com/arangodb/vst/VstProtocol.java b/vst-protocol/src/main/java/com/arangodb/vst/VstProtocol.java new file mode 100644 index 000000000..7d840a8ab --- /dev/null +++ b/vst-protocol/src/main/java/com/arangodb/vst/VstProtocol.java @@ -0,0 +1,72 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.vst; + +import com.arangodb.ArangoDBException; +import com.arangodb.arch.UnstableApi; +import com.arangodb.internal.InternalRequest; +import com.arangodb.internal.InternalResponse; +import com.arangodb.internal.net.CommunicationProtocol; +import com.arangodb.internal.net.HostHandle; + +import java.io.IOException; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; + +/** + * @author Mark Vollmary + */ +@UnstableApi +public class VstProtocol implements CommunicationProtocol { + + private final VstCommunication communication; + private final ExecutorService outgoingExecutor = Executors.newCachedThreadPool(); + + public VstProtocol(final VstCommunication communication) { + super(); + this.communication = communication; + } + + @Override + @UnstableApi + public CompletableFuture executeAsync(@UnstableApi InternalRequest request, @UnstableApi HostHandle hostHandle) { + if (outgoingExecutor.isShutdown()) { + CompletableFuture cf = new CompletableFuture<>(); + cf.completeExceptionally(new ArangoDBException("VstProtocol already closed!")); + return cf; + } + return CompletableFuture.completedFuture(null) + .thenComposeAsync(__ -> communication.executeAsync(request, hostHandle), outgoingExecutor); + } + + @Override + public void setJwt(String jwt) { + communication.setJwt(jwt); + } + + @Override + public void close() throws IOException { + outgoingExecutor.shutdown(); + communication.close(); + } + +} diff --git a/vst-protocol/src/main/java/com/arangodb/vst/VstProtocolProvider.java b/vst-protocol/src/main/java/com/arangodb/vst/VstProtocolProvider.java new file mode 100644 index 000000000..274cd0284 --- /dev/null +++ b/vst-protocol/src/main/java/com/arangodb/vst/VstProtocolProvider.java @@ -0,0 +1,36 @@ +package com.arangodb.vst; + +import com.arangodb.Protocol; +import com.arangodb.arch.UnstableApi; +import com.arangodb.internal.config.ArangoConfig; +import com.arangodb.internal.net.CommunicationProtocol; +import com.arangodb.internal.net.ConnectionFactory; +import com.arangodb.internal.net.HostHandler; +import com.arangodb.internal.net.ProtocolProvider; +import com.fasterxml.jackson.databind.Module; + +@UnstableApi +public class VstProtocolProvider implements ProtocolProvider { + @Override + public boolean supportsProtocol(Protocol protocol) { + return Protocol.VST.equals(protocol); + } + + @Override + @UnstableApi + public ConnectionFactory createConnectionFactory() { + return new VstConnectionFactoryAsync(); + } + + @Override + @UnstableApi + public CommunicationProtocol createProtocol(@UnstableApi ArangoConfig config, @UnstableApi HostHandler hostHandler) { + return new VstProtocol(new VstCommunication(config, hostHandler)); + } + + @Override + public Module protocolModule() { + return VstModule.INSTANCE.get(); + } + +} diff --git a/vst-protocol/src/main/java/com/arangodb/vst/VstSerializers.java b/vst-protocol/src/main/java/com/arangodb/vst/VstSerializers.java new file mode 100644 index 000000000..2ba762a52 --- /dev/null +++ b/vst-protocol/src/main/java/com/arangodb/vst/VstSerializers.java @@ -0,0 +1,41 @@ +package com.arangodb.vst; + +import com.arangodb.arch.UnstableApi; +import com.arangodb.vst.internal.AuthenticationRequest; +import com.arangodb.vst.internal.JwtAuthenticationRequest; +import com.fasterxml.jackson.core.JsonGenerator; +import com.fasterxml.jackson.databind.JsonSerializer; +import com.fasterxml.jackson.databind.SerializerProvider; + +import java.io.IOException; + +public final class VstSerializers { + + static final JsonSerializer AUTHENTICATION_REQUEST = + new JsonSerializer() { + @Override + public void serialize(@UnstableApi AuthenticationRequest value, JsonGenerator gen, SerializerProvider serializers) throws IOException { + gen.writeStartArray(); + gen.writeNumber(value.getVersion()); + gen.writeNumber(value.getType()); + gen.writeString(value.getEncryption()); + gen.writeString(value.getUser()); + gen.writeString(value.getPassword()); + gen.writeEndArray(); + } + }; + static final JsonSerializer JWT_AUTHENTICATION_REQUEST = + new JsonSerializer() { + @Override + public void serialize(@UnstableApi JwtAuthenticationRequest value, JsonGenerator gen, + SerializerProvider serializers) throws IOException { + gen.writeStartArray(); + gen.writeNumber(value.getVersion()); + gen.writeNumber(value.getType()); + gen.writeString(value.getEncryption()); + gen.writeString(value.getToken()); + gen.writeEndArray(); + } + }; + +} diff --git a/vst-protocol/src/main/java/com/arangodb/vst/internal/AuthenticationRequest.java b/vst-protocol/src/main/java/com/arangodb/vst/internal/AuthenticationRequest.java new file mode 100644 index 000000000..8a0e11288 --- /dev/null +++ b/vst-protocol/src/main/java/com/arangodb/vst/internal/AuthenticationRequest.java @@ -0,0 +1,56 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.vst.internal; + +import com.arangodb.arch.UsedInApi; +import com.arangodb.internal.InternalRequest; + +/** + * @author Mark Vollmary + */ +@UsedInApi +public class AuthenticationRequest extends InternalRequest { + + private final String user; + private final String password; + private final String encryption;// "plain" + + public AuthenticationRequest(final String user, final String password, final String encryption) { + super(null, null, null); + this.user = user; + this.password = password; + this.encryption = encryption; + setType(1000); + } + + public String getUser() { + return user; + } + + public String getPassword() { + return password; + } + + public String getEncryption() { + return encryption; + } + +} diff --git a/vst-protocol/src/main/java/com/arangodb/vst/internal/Chunk.java b/vst-protocol/src/main/java/com/arangodb/vst/internal/Chunk.java new file mode 100644 index 000000000..93e2d7848 --- /dev/null +++ b/vst-protocol/src/main/java/com/arangodb/vst/internal/Chunk.java @@ -0,0 +1,88 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.vst.internal; + +/** + * @author Mark Vollmary + */ +public class Chunk { + + private final long messageId; + private final long messageLength; + private final int chunkX; + private final int contentOffset; + private final int contentLength; + + public Chunk(final long messageId, final int chunkX, final long messageLength, final int contentOffset, + final int contentLength) { + this.messageId = messageId; + this.chunkX = chunkX; + this.messageLength = messageLength; + this.contentOffset = contentOffset; + this.contentLength = contentLength; + } + + public Chunk(final long messageId, final int chunkIndex, final int numberOfChunks, final long messageLength, + final int contentOffset, final int contentLength) { + this(messageId, chunkX(chunkIndex, numberOfChunks), messageLength, contentOffset, contentLength); + } + + private static int chunkX(final int chunkIndex, final int numberOfChunks) { + int chunkX; + if (numberOfChunks == 1) { + chunkX = 3;// last byte: 0000 0011 + } else if (chunkIndex == 0) { + chunkX = (numberOfChunks << 1) + 1; + } else { + chunkX = chunkIndex << 1; + } + return chunkX; + } + + public long getMessageId() { + return messageId; + } + + public long getMessageLength() { + return messageLength; + } + + public boolean isFirstChunk() { + return 1 == (chunkX & 0x1); + } + + public int getChunk() { + return chunkX >> 1; + } + + public int getChunkX() { + return chunkX; + } + + public int getContentOffset() { + return contentOffset; + } + + public int getContentLength() { + return contentLength; + } + +} diff --git a/vst-protocol/src/main/java/com/arangodb/vst/internal/ChunkStore.java b/vst-protocol/src/main/java/com/arangodb/vst/internal/ChunkStore.java new file mode 100644 index 000000000..a945ec79d --- /dev/null +++ b/vst-protocol/src/main/java/com/arangodb/vst/internal/ChunkStore.java @@ -0,0 +1,70 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.vst.internal; + +import java.nio.BufferUnderflowException; +import java.nio.ByteBuffer; +import java.util.HashMap; +import java.util.Map; + +/** + * @author Mark Vollmary + */ +public class ChunkStore { + + private final MessageStore messageStore; + private final Map data; + + public ChunkStore(final MessageStore messageStore) { + super(); + this.messageStore = messageStore; + data = new HashMap<>(); + } + + public ByteBuffer storeChunk(final Chunk chunk) throws BufferUnderflowException, IndexOutOfBoundsException { + final long messageId = chunk.getMessageId(); + ByteBuffer chunkBuffer = data.get(messageId); + if (chunkBuffer == null) { + if (!chunk.isFirstChunk()) { + messageStore.cancel(messageId); + return null; + } + final int length = (int) (chunk.getMessageLength() > 0 ? chunk.getMessageLength() + : chunk.getContentLength()); + chunkBuffer = ByteBuffer.allocate(length); + data.put(messageId, chunkBuffer); + } + return chunkBuffer; + } + + public void checkCompleteness(final long messageId) { + checkCompleteness(messageId, data.get(messageId)); + } + + private void checkCompleteness(final long messageId, final ByteBuffer chunkBuffer) + throws BufferUnderflowException, IndexOutOfBoundsException { + if (chunkBuffer.position() == chunkBuffer.limit()) { + messageStore.consume(new Message(messageId, chunkBuffer.array())); + data.remove(messageId); + } + } + +} diff --git a/vst-protocol/src/main/java/com/arangodb/vst/internal/JwtAuthenticationRequest.java b/vst-protocol/src/main/java/com/arangodb/vst/internal/JwtAuthenticationRequest.java new file mode 100644 index 000000000..726057c16 --- /dev/null +++ b/vst-protocol/src/main/java/com/arangodb/vst/internal/JwtAuthenticationRequest.java @@ -0,0 +1,27 @@ +package com.arangodb.vst.internal; + +import com.arangodb.arch.UsedInApi; +import com.arangodb.internal.InternalRequest; + +@UsedInApi +public class JwtAuthenticationRequest extends InternalRequest { + + private final String token; + private final String encryption; // "jwt" + + public JwtAuthenticationRequest(final String token, final String encryption) { + super(null, null, null); + this.token = token; + this.encryption = encryption; + setType(1000); + } + + public String getToken() { + return token; + } + + public String getEncryption() { + return encryption; + } + +} \ No newline at end of file diff --git a/vst-protocol/src/main/java/com/arangodb/vst/internal/Message.java b/vst-protocol/src/main/java/com/arangodb/vst/internal/Message.java new file mode 100644 index 000000000..7b673fb68 --- /dev/null +++ b/vst-protocol/src/main/java/com/arangodb/vst/internal/Message.java @@ -0,0 +1,67 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.vst.internal; + +import com.arangodb.velocypack.VPackSlice; + +import java.nio.BufferUnderflowException; + +/** + * @author Mark Vollmary + */ +public class Message { + + private final long id; + private final VPackSlice head; + private final VPackSlice body; + + public Message(final long id, final byte[] chunkBuffer) throws BufferUnderflowException, IndexOutOfBoundsException { + super(); + this.id = id; + head = new VPackSlice(chunkBuffer); + final int headSize = head.getByteSize(); + if (chunkBuffer.length > headSize) { + body = new VPackSlice(chunkBuffer, headSize); + } else { + body = null; + } + } + + public Message(final long id, final byte[] head, final byte[] body) { + super(); + this.id = id; + this.head = new VPackSlice(head); + this.body = body != null ? new VPackSlice(body) : null; + } + + public long getId() { + return id; + } + + public VPackSlice getHead() { + return head; + } + + public VPackSlice getBody() { + return body; + } + +} diff --git a/vst-protocol/src/main/java/com/arangodb/vst/internal/MessageStore.java b/vst-protocol/src/main/java/com/arangodb/vst/internal/MessageStore.java new file mode 100644 index 000000000..1d8d6effb --- /dev/null +++ b/vst-protocol/src/main/java/com/arangodb/vst/internal/MessageStore.java @@ -0,0 +1,98 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.vst.internal; + +import com.arangodb.ArangoDBException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.Map; +import java.util.Map.Entry; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.FutureTask; + +/** + * @author Mark Vollmary + */ +public class MessageStore { + + private static final Logger LOGGER = LoggerFactory.getLogger(MessageStore.class); + + private final Map> task; + private final Map response; + private final Map error; + + public MessageStore() { + super(); + task = new ConcurrentHashMap<>(); + response = new ConcurrentHashMap<>(); + error = new ConcurrentHashMap<>(); + } + + public void storeMessage(final long messageId, final FutureTask future) { + task.put(messageId, future); + } + + public void consume(final Message message) { + final FutureTask future = task.remove(message.getId()); + if (future != null) { + response.put(message.getId(), message); + future.run(); + } + } + + public Message get(final long messageId) { + final Message result = response.remove(messageId); + if (result == null) { + final Exception e = error.remove(messageId); + if (e != null) { + throw ArangoDBException.of(e); + } + } + return result; + } + + public void cancel(final long messageId) { + final FutureTask future = task.remove(messageId); + if (future != null) { + LOGGER.error("Cancel Message unexpected (id={}).", messageId); + future.cancel(true); + } + } + + public synchronized void clear(final Exception e) { + if (!task.isEmpty()) { + LOGGER.error(e.getMessage(), e); + } + for (final Entry> entry : task.entrySet()) { + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(String.format("Exceptionally complete Message (id=%s).", entry.getKey())); + } + error.put(entry.getKey(), e); + entry.getValue().run(); + } + task.clear(); + } + + public boolean isEmpty() { + return task.isEmpty(); + } +} diff --git a/vst-protocol/src/main/java/com/arangodb/vst/internal/VstConnection.java b/vst-protocol/src/main/java/com/arangodb/vst/internal/VstConnection.java new file mode 100644 index 000000000..870051fa6 --- /dev/null +++ b/vst-protocol/src/main/java/com/arangodb/vst/internal/VstConnection.java @@ -0,0 +1,350 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.vst.internal; + +import com.arangodb.ArangoDBException; +import com.arangodb.config.HostDescription; +import com.arangodb.internal.ArangoDefaults; +import com.arangodb.internal.config.ArangoConfig; +import com.arangodb.internal.net.Connection; +import com.arangodb.internal.net.ConnectionPool; +import com.arangodb.velocypack.VPackBuilder; +import com.arangodb.velocypack.VPackSlice; +import com.arangodb.velocypack.ValueType; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.net.SocketFactory; +import javax.net.ssl.SSLContext; +import javax.net.ssl.SSLSocket; +import java.io.BufferedOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.net.InetSocketAddress; +import java.net.Socket; +import java.nio.ByteBuffer; +import java.nio.ByteOrder; +import java.util.Collection; +import java.util.Collections; +import java.util.Date; +import java.util.Map; +import java.util.concurrent.*; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; + +/** + * @author Mark Vollmary + */ +public abstract class VstConnection implements Connection { + private static final Logger LOGGER = LoggerFactory.getLogger(VstConnection.class); + private static final AtomicInteger THREAD_COUNT = new AtomicInteger(); + private static final byte[] PROTOCOL_HEADER = "VST/1.0\r\n\r\n".getBytes(); + protected final MessageStore messageStore = new MessageStore(); + protected final Integer timeout; + private final AtomicLong keepAliveId = new AtomicLong(); + private final Long ttl; + private final Integer keepAliveInterval; + private final Boolean useSsl; + private final SSLContext sslContext; + private final HostDescription host; + private final Map sendTimestamps = new ConcurrentHashMap<>(); + private final String connectionName; + private final ConnectionPool pool; + private final byte[] keepAliveRequest = new VPackBuilder() + .add(ValueType.ARRAY) + .add(1) + .add(1) + .add("_system") + .add(1) + .add("/_admin/server/availability") + .add(ValueType.OBJECT) + .close() + .add(ValueType.OBJECT) + .close() + .close() + .slice() + .toByteArray(); + private ExecutorService executor; + private ScheduledExecutorService keepAliveScheduler; + private int keepAliveFailCounter = 0; + private Socket socket; + private OutputStream outputStream; + private InputStream inputStream; + + protected VstConnection(final ArangoConfig config, final HostDescription host, final ConnectionPool pool) { + super(); + timeout = config.getTimeout(); + ttl = config.getConnectionTtl(); + keepAliveInterval = config.getKeepAliveInterval(); + useSsl = config.getUseSsl(); + sslContext = config.getSslContext(); + this.host = host; + this.pool = pool; + + connectionName = "connection_" + System.currentTimeMillis() + "_" + Math.random(); + LOGGER.debug("[" + connectionName + "]: Connection created"); + } + + protected T sendKeepAlive() { + long id = keepAliveId.decrementAndGet(); + Message message = new Message(id, keepAliveRequest, null); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(String.format("[%s]: Send keepalive probe (id=%s, head=%s, body=%s)", connectionName, + message.getId(), message.getHead(), + message.getBody() != null ? message.getBody() : "{}")); + } + return write(message, Collections.singleton(new Chunk( + id, 0, 1, -1, + 0, keepAliveRequest.length + ))); + } + + public abstract T write(final Message message, final Collection chunks); + + protected abstract void doKeepAlive(); + + private void keepAlive() { + try { + doKeepAlive(); + keepAliveFailCounter = 0; + } catch (Exception e) { + LOGGER.error("Got exception while performing keepAlive request:", e); + keepAliveFailCounter++; + if (keepAliveFailCounter >= 3) { + LOGGER.error("KeepAlive request failed consecutively for 3 times, closing connection now..."); + messageStore.clear(new IOException("Connection unresponsive!")); + close(); + } + } + } + + public boolean isOpen() { + return socket != null && socket.isConnected() && !socket.isClosed(); + } + + public synchronized void open() throws IOException { + if (isOpen()) { + return; + } + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(String.format("[%s]: Open connection to %s", connectionName, host)); + } + if (Boolean.TRUE.equals(useSsl)) { + socket = sslContext.getSocketFactory().createSocket(); + } else { + socket = SocketFactory.getDefault().createSocket(); + } + socket.connect(new InetSocketAddress(host.getHost(), host.getPort()), timeout); + socket.setKeepAlive(true); + socket.setTcpNoDelay(true); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(String.format("[%s]: Connected to %s", connectionName, socket)); + } + + outputStream = new BufferedOutputStream(socket.getOutputStream()); + inputStream = socket.getInputStream(); + + if (Boolean.TRUE.equals(useSsl)) { + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(String.format("[%s]: Start Handshake on %s", connectionName, socket)); + } + ((SSLSocket) socket).startHandshake(); + } + sendProtocolHeader(); + + executor = Executors.newSingleThreadExecutor(r -> { + Thread t = Executors.defaultThreadFactory().newThread(r); + t.setDaemon(true); + t.setName("adb-vst-" + THREAD_COUNT.getAndIncrement()); + return t; + }); + executor.submit((Callable) () -> { + LOGGER.debug("[" + connectionName + "]: Start Callable"); + + final long openTime = new Date().getTime(); + final Long ttlTime = ttl != null && ttl > 0 ? openTime + ttl : null; + final ChunkStore chunkStore = new ChunkStore(messageStore); + while (true) { + if (ttlTime != null && new Date().getTime() > ttlTime && messageStore.isEmpty()) { + close(); + break; + } + if (!isOpen()) { + messageStore.clear(new IOException("The socket is closed.")); + close(); + break; + } + try { + final Chunk chunk = readChunk(); + final ByteBuffer chunkBuffer = chunkStore.storeChunk(chunk); + if (chunkBuffer != null) { + final byte[] buf = new byte[chunk.getContentLength()]; + readBytesIntoBuffer(buf, 0, buf.length); + chunkBuffer.put(buf); + chunkStore.checkCompleteness(chunk.getMessageId()); + } + } catch (final Exception e) { + messageStore.clear(e); + close(); + break; + } + } + + LOGGER.debug("[" + connectionName + "]: Stop Callable"); + + return null; + }); + + if (keepAliveInterval != null) { + keepAliveScheduler = Executors.newScheduledThreadPool(1); + keepAliveScheduler.scheduleAtFixedRate(this::keepAlive, 0, keepAliveInterval, TimeUnit.SECONDS); + } + + } + + @Override + public synchronized void close() { + if (keepAliveScheduler != null) { + keepAliveScheduler.shutdown(); + } + messageStore.clear(new IOException("Connection closed")); + if (executor != null && !executor.isShutdown()) { + executor.shutdown(); + } + if (socket != null && !socket.isClosed()) { + try { + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(String.format("[%s]: Close connection %s", connectionName, socket)); + } + socket.close(); + } catch (final IOException e) { + throw ArangoDBException.of(e); + } + } + } + + @Override + public void release() { + pool.release(this); + } + + private synchronized void sendProtocolHeader() throws IOException { + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(String.format("[%s]: Send velocystream protocol header to %s", connectionName, socket)); + } + outputStream.write(PROTOCOL_HEADER); + outputStream.flush(); + } + + protected synchronized void writeIntern(final Message message, final Collection chunks) { + for (final Chunk chunk : chunks) { + try { + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(String.format("[%s]: Send chunk %s:%s from message %s", connectionName, + chunk.getChunk(), + chunk.isFirstChunk() ? 1 : 0, chunk.getMessageId())); + sendTimestamps.put(chunk.getMessageId(), System.currentTimeMillis()); + } + writeChunkHead(chunk); + final int contentOffset = chunk.getContentOffset(); + final int contentLength = chunk.getContentLength(); + final VPackSlice head = message.getHead(); + final int headLength = head.getByteSize(); + int written = 0; + if (contentOffset < headLength) { + written = Math.min(contentLength, headLength - contentOffset); + outputStream.write(head.getBuffer(), contentOffset, written); + } + if (written < contentLength) { + final VPackSlice body = message.getBody(); + outputStream.write(body.getBuffer(), contentOffset + written - headLength, contentLength - written); + } + outputStream.flush(); + } catch (final IOException e) { + LOGGER.error("Error on Connection " + connectionName); + throw ArangoDBException.of(e); + } + } + } + + private synchronized void writeChunkHead(final Chunk chunk) throws IOException { + final long messageLength = chunk.getMessageLength(); + final int headLength = messageLength > -1L ? ArangoDefaults.CHUNK_MAX_HEADER_SIZE + : ArangoDefaults.CHUNK_MIN_HEADER_SIZE; + final int length = chunk.getContentLength() + headLength; + final ByteBuffer buffer = ByteBuffer.allocate(headLength).order(ByteOrder.LITTLE_ENDIAN); + buffer.putInt(length); + buffer.putInt(chunk.getChunkX()); + buffer.putLong(chunk.getMessageId()); + if (messageLength > -1L) { + buffer.putLong(messageLength); + } + outputStream.write(buffer.array()); + } + + protected Chunk readChunk() throws IOException { + final ByteBuffer chunkHeadBuffer = readBytes(ArangoDefaults.CHUNK_MIN_HEADER_SIZE); + final int length = chunkHeadBuffer.getInt(); + final int chunkX = chunkHeadBuffer.getInt(); + final long messageId = chunkHeadBuffer.getLong(); + final long messageLength; + final int contentLength; + if ((1 == (chunkX & 0x1)) && ((chunkX >> 1) > 1)) { + messageLength = readBytes(ArangoDefaults.LONG_BYTES).getLong(); + contentLength = length - ArangoDefaults.CHUNK_MAX_HEADER_SIZE; + } else { + messageLength = -1L; + contentLength = length - ArangoDefaults.CHUNK_MIN_HEADER_SIZE; + } + final Chunk chunk = new Chunk(messageId, chunkX, messageLength, 0, contentLength); + + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(String.format("[%s]: Received chunk %s:%s from message %s", connectionName, chunk.getChunk() + , chunk.isFirstChunk() ? 1 : 0, chunk.getMessageId())); + LOGGER.debug("[" + connectionName + "]: Responsetime for Message " + chunk.getMessageId() + " is " + (System.currentTimeMillis() - sendTimestamps.get(chunk.getMessageId()))); + } + + return chunk; + } + + private ByteBuffer readBytes(final int len) throws IOException { + final byte[] buf = new byte[len]; + readBytesIntoBuffer(buf, 0, len); + return ByteBuffer.wrap(buf).order(ByteOrder.LITTLE_ENDIAN); + } + + protected void readBytesIntoBuffer(final byte[] buf, final int off, final int len) throws IOException { + for (int readed = 0; readed < len; ) { + final int read = inputStream.read(buf, off + readed, len - readed); + if (read == -1) { + throw new IOException("Reached the end of the stream."); + } else { + readed += read; + } + } + } + + @Override + public void setJwt(String jwt) { + // no-op: VST connections send jwt token only at initialization time + } +} diff --git a/vst-protocol/src/main/java/com/arangodb/vst/internal/VstConnectionAsync.java b/vst-protocol/src/main/java/com/arangodb/vst/internal/VstConnectionAsync.java new file mode 100644 index 000000000..5b128340e --- /dev/null +++ b/vst-protocol/src/main/java/com/arangodb/vst/internal/VstConnectionAsync.java @@ -0,0 +1,161 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.vst.internal; + +import com.arangodb.PackageVersion; +import com.arangodb.config.HostDescription; +import com.arangodb.internal.InternalRequest; +import com.arangodb.internal.InternalResponse; +import com.arangodb.internal.config.ArangoConfig; +import com.arangodb.internal.net.ConnectionPool; +import com.arangodb.internal.serde.InternalSerde; +import com.arangodb.velocypack.VPackSlice; +import com.arangodb.velocypack.exception.VPackParserException; +import com.arangodb.vst.internal.utils.CompletableFutureUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CompletionException; +import java.util.concurrent.FutureTask; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicLong; + +/** + * @author Mark Vollmary + */ +public class VstConnectionAsync extends VstConnection> { + private final static Logger LOGGER = LoggerFactory.getLogger(VstConnectionAsync.class); + private static final AtomicLong mId = new AtomicLong(0L); + private static final String X_ARANGO_DRIVER = "JavaDriver/" + PackageVersion.VERSION + " (JVM/" + System.getProperty("java.specification.version") + ")"; + private final Integer chunkSize; + private final InternalSerde serde; + + + public VstConnectionAsync(final ArangoConfig config, final HostDescription host, final ConnectionPool pool) { + super(config, host, pool); + chunkSize = config.getChunkSize(); + serde = config.getInternalSerde(); + } + + @Override + public synchronized CompletableFuture write(final Message message, final Collection chunks) { + final CompletableFuture future = new CompletableFuture<>(); + final FutureTask task = new FutureTask<>(() -> { + try { + future.complete(messageStore.get(message.getId())); + } catch (final Exception e) { + future.completeExceptionally(e); + } + return null; + }); + messageStore.storeMessage(message.getId(), task); + super.writeIntern(message, chunks); + if (timeout == null || timeout == 0L) { + return future; + } else { + return CompletableFutureUtils.orTimeout(future, timeout, TimeUnit.MILLISECONDS); + } + } + + @Override + protected void doKeepAlive() { + sendKeepAlive().join(); + } + + @Override + public CompletableFuture executeAsync(final InternalRequest request) { + // TODO: refactor using Future composition + final CompletableFuture rfuture = new CompletableFuture<>(); + try { + final Message message = createMessage(request); + send(message).whenComplete((m, ex) -> { + if (m != null) { + final InternalResponse response; + try { + response = createResponse(m); + } catch (final Exception e) { + rfuture.completeExceptionally(e); + return; + } + rfuture.complete(response); + } else { + Throwable e = ex instanceof CompletionException ? ex.getCause() : ex; + rfuture.completeExceptionally(e); + } + }); + } catch (Exception e) { + LOGGER.error(e.getMessage(), e); + rfuture.completeExceptionally(e); + } + return rfuture; + } + + private Message createMessage(final InternalRequest request) throws VPackParserException { + request.putHeaderParam("accept", "application/x-velocypack"); + request.putHeaderParam("content-type", "application/x-velocypack"); + request.putHeaderParam("x-arango-driver", X_ARANGO_DRIVER); + final long id = mId.incrementAndGet(); + return new Message(id, serde.serialize(request), request.getBody()); + } + + private CompletableFuture send(final Message message) { + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(String.format("Send Message (id=%s, head=%s, body=%s)", + message.getId(), + serde.toJsonString(message.getHead().toByteArray()), + message.getBody() != null ? serde.toJsonString(message.getBody().toByteArray()) : "{}")); + } + return write(message, buildChunks(message)); + } + + private Collection buildChunks(final Message message) { + final Collection chunks = new ArrayList<>(); + final VPackSlice head = message.getHead(); + int size = head.getByteSize(); + final VPackSlice body = message.getBody(); + if (body != null) { + size += body.getByteSize(); + } + final int n = size / chunkSize; + final int numberOfChunks = (size % chunkSize != 0) ? (n + 1) : n; + int off = 0; + for (int i = 0; size > 0; i++) { + final int len = Math.min(chunkSize, size); + final long messageLength = (i == 0 && numberOfChunks > 1) ? size : -1L; + final Chunk chunk = new Chunk(message.getId(), i, numberOfChunks, messageLength, off, len); + size -= len; + off += len; + chunks.add(chunk); + } + return chunks; + } + + private InternalResponse createResponse(final Message message) throws VPackParserException { + InternalResponse response = serde.deserialize(message.getHead().toByteArray(), InternalResponse.class); + if (message.getBody() != null) { + response.setBody(message.getBody().toByteArray()); + } + return response; + } +} diff --git a/vst-protocol/src/main/java/com/arangodb/vst/internal/utils/CompletableFutureUtils.java b/vst-protocol/src/main/java/com/arangodb/vst/internal/utils/CompletableFutureUtils.java new file mode 100644 index 000000000..2321ac5aa --- /dev/null +++ b/vst-protocol/src/main/java/com/arangodb/vst/internal/utils/CompletableFutureUtils.java @@ -0,0 +1,24 @@ +package com.arangodb.vst.internal.utils; + +import java.util.concurrent.*; + +public class CompletableFutureUtils { + + private CompletableFutureUtils() { + } + + private static final ScheduledExecutorService timeoutScheduler = Executors.newSingleThreadScheduledExecutor(r -> { + Thread t = Executors.defaultThreadFactory().newThread(r); + t.setDaemon(true); + return t; + } + ); + + public static CompletableFuture orTimeout(CompletableFuture completableFuture, long timeout, TimeUnit unit) { + ScheduledFuture timeoutTask = timeoutScheduler.schedule(() -> + completableFuture.completeExceptionally(new TimeoutException()), timeout, unit); + completableFuture.whenComplete((v, e) -> timeoutTask.cancel(false)); + return completableFuture; + } + +} diff --git a/vst-protocol/src/main/resources/META-INF/native-image/com.arangodb/vst-protocol/native-image.properties b/vst-protocol/src/main/resources/META-INF/native-image/com.arangodb/vst-protocol/native-image.properties new file mode 100644 index 000000000..f6d4bf39a --- /dev/null +++ b/vst-protocol/src/main/resources/META-INF/native-image/com.arangodb/vst-protocol/native-image.properties @@ -0,0 +1,3 @@ +Args=\ +-H:ResourceConfigurationResources=${.}/resource-config-spi.json \ +-H:ReflectionConfigurationResources=${.}/reflect-config-spi.json diff --git a/vst-protocol/src/main/resources/META-INF/native-image/com.arangodb/vst-protocol/reflect-config-spi.json b/vst-protocol/src/main/resources/META-INF/native-image/com.arangodb/vst-protocol/reflect-config-spi.json new file mode 100644 index 000000000..57c85201e --- /dev/null +++ b/vst-protocol/src/main/resources/META-INF/native-image/com.arangodb/vst-protocol/reflect-config-spi.json @@ -0,0 +1,11 @@ +[ + { + "name": "com.arangodb.vst.VstProtocolProvider", + "methods": [ + { + "name": "", + "parameterTypes": [] + } + ] + } +] diff --git a/vst-protocol/src/main/resources/META-INF/native-image/com.arangodb/vst-protocol/resource-config-spi.json b/vst-protocol/src/main/resources/META-INF/native-image/com.arangodb/vst-protocol/resource-config-spi.json new file mode 100644 index 000000000..9037d85e5 --- /dev/null +++ b/vst-protocol/src/main/resources/META-INF/native-image/com.arangodb/vst-protocol/resource-config-spi.json @@ -0,0 +1,10 @@ +{ + "resources": { + "includes": [ + { + "pattern": "META-INF/services/com.arangodb.internal.net.ProtocolProvider" + } + ] + }, + "bundles": [] +} diff --git a/vst-protocol/src/main/resources/META-INF/services/com.arangodb.internal.net.ProtocolProvider b/vst-protocol/src/main/resources/META-INF/services/com.arangodb.internal.net.ProtocolProvider new file mode 100644 index 000000000..6d11ca0c7 --- /dev/null +++ b/vst-protocol/src/main/resources/META-INF/services/com.arangodb.internal.net.ProtocolProvider @@ -0,0 +1 @@ +com.arangodb.vst.VstProtocolProvider